From 2df15fffc612b53b2c8e4ff3c981a82441bc00ae Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 30 Oct 2005 20:44:37 +1100 Subject: [PADLOCK] Fix sparse warning about 1-bit signed bit-field Change the bit-field in struct cword to unsigned to shut sparse up. Signed-off-by: Herbert Xu --- drivers/crypto/padlock.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/padlock.h b/drivers/crypto/padlock.h index 3cf2b7a1234..b78489bc298 100644 --- a/drivers/crypto/padlock.h +++ b/drivers/crypto/padlock.h @@ -17,7 +17,7 @@ /* Control word. */ struct cword { - int __attribute__ ((__packed__)) + unsigned int __attribute__ ((__packed__)) rounds:4, algo:3, keygen:1, -- cgit v1.2.3 From 06ace7a9bafeb9047352707eb79e8eaa0dfdf5f2 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 30 Oct 2005 21:25:15 +1100 Subject: [CRYPTO] Use standard byte order macros wherever possible A lot of crypto code needs to read/write a 32-bit/64-bit words in a specific gender. Many of them open code them by reading/writing one byte at a time. This patch converts all the applicable usages over to use the standard byte order macros. This is based on a previous patch by Denis Vlasenko. Signed-off-by: Herbert Xu --- drivers/crypto/padlock-aes.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 71407c578af..963e03dcb1b 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -99,9 +99,6 @@ byte(const uint32_t x, const unsigned n) return x >> (n << 3); } -#define uint32_t_in(x) le32_to_cpu(*(const uint32_t *)(x)) -#define uint32_t_out(to, from) (*(uint32_t *)(to) = cpu_to_le32(from)) - #define E_KEY ctx->E #define D_KEY ctx->D @@ -294,6 +291,7 @@ static int aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t *flags) { struct aes_ctx *ctx = aes_ctx(ctx_arg); + const __le32 *key = (const __le32 *)in_key; uint32_t i, t, u, v, w; uint32_t P[AES_EXTENDED_KEY_SIZE]; uint32_t rounds; @@ -313,10 +311,10 @@ aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t ctx->E = ctx->e_data; ctx->D = ctx->e_data; - E_KEY[0] = uint32_t_in (in_key); - E_KEY[1] = uint32_t_in (in_key + 4); - E_KEY[2] = uint32_t_in (in_key + 8); - E_KEY[3] = uint32_t_in (in_key + 12); + E_KEY[0] = le32_to_cpu(key[0]); + E_KEY[1] = le32_to_cpu(key[1]); + E_KEY[2] = le32_to_cpu(key[2]); + E_KEY[3] = le32_to_cpu(key[3]); /* Prepare control words. */ memset(&ctx->cword, 0, sizeof(ctx->cword)); @@ -343,17 +341,17 @@ aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t break; case 24: - E_KEY[4] = uint32_t_in (in_key + 16); - t = E_KEY[5] = uint32_t_in (in_key + 20); + E_KEY[4] = le32_to_cpu(key[4]); + t = E_KEY[5] = le32_to_cpu(key[5]); for (i = 0; i < 8; ++i) loop6 (i); break; case 32: - E_KEY[4] = uint32_t_in (in_key + 16); - E_KEY[5] = uint32_t_in (in_key + 20); - E_KEY[6] = uint32_t_in (in_key + 24); - t = E_KEY[7] = uint32_t_in (in_key + 28); + E_KEY[4] = le32_to_cpu(in_key[4]); + E_KEY[5] = le32_to_cpu(in_key[5]); + E_KEY[6] = le32_to_cpu(in_key[6]); + t = E_KEY[7] = le32_to_cpu(in_key[7]); for (i = 0; i < 7; ++i) loop8 (i); break; -- cgit v1.2.3 From c8a19c91b5b488fed8cce04200a84c6a35c0bf0c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 5 Nov 2005 18:06:26 +1100 Subject: [CRYPTO] Allow AES C/ASM implementations to coexist As the Crypto API now allows multiple implementations to be registered for the same algorithm, we no longer have to play tricks with Kconfig to select the right AES implementation. This patch sets the driver name and priority for all the AES implementations and removes the Kconfig conditions on the C implementation for AES. Signed-off-by: Herbert Xu --- drivers/crypto/padlock-aes.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 963e03dcb1b..64819aa7cac 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -466,6 +466,8 @@ static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out, static struct crypto_alg aes_alg = { .cra_name = "aes", + .cra_driver_name = "aes-padlock", + .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aes_ctx), -- cgit v1.2.3 From c1854ebc7f13b23c3d6a6e641a1a1db1116ca998 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Sun, 8 Jan 2006 22:31:04 -0800 Subject: [AX25] mkiss: Drop spinlock before sleeping call. With the previous missing-unlock fix the spinlock is dropped only after the tty->driver->write() call which might sleep. Signed-off-by: Ralf Baechle Signed-off-by: David S. Miller --- drivers/net/hamradio/mkiss.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 41b3d83c2ab..f4424cf886c 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -515,6 +515,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len) count = kiss_esc(p, (unsigned char *)ax->xbuff, len); } } + spin_unlock_bh(&ax->buflock); set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags); actual = ax->tty->driver->write(ax->tty, ax->xbuff, count); @@ -524,7 +525,6 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len) ax->dev->trans_start = jiffies; ax->xleft = count - actual; ax->xhead = ax->xbuff + actual; - spin_unlock_bh(&ax->buflock); } /* Encapsulate an AX.25 packet and kick it into a TTY queue. */ -- cgit v1.2.3 From 253af4235d24ddfcd9f5403485e9273b33d8fa5e Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Sun, 8 Jan 2006 22:34:25 -0800 Subject: [NET]: Add IFB (Intermediate Functional Block) network device. A new device to do intermidiate functional block in a system shared manner. To use the new functionality, you need to turn on qos/classifier actions. The new functionality can be grouped as: 1) qdiscs/policies that are per device as opposed to system wide. ifb allows for a device which can be redirected to thus providing an impression of sharing. 2) Allows for queueing incoming traffic for shaping instead of dropping. Packets are redirected to this device using tc/action mirred redirect construct. If they are sent to it by plain routing instead then they will merely be dropped and the stats would indicate that. Signed-off-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- drivers/net/Kconfig | 13 +++ drivers/net/Makefile | 1 + drivers/net/ifb.c | 294 +++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 308 insertions(+) create mode 100644 drivers/net/ifb.c (limited to 'drivers') diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 733bc25b2bf..4959800a18d 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -27,6 +27,19 @@ config NETDEVICES # that for each of the symbols. if NETDEVICES +config IFB + tristate "Intermediate Functional Block support" + depends on NET_CLS_ACT + ---help--- + This is an intermidiate driver that allows sharing of + resources. + To compile this driver as a module, choose M here: the module + will be called ifb. If you want to use more than one ifb + device at a time, you need to compile this driver as a module. + Instead of 'ifb', the devices will then be called 'ifb0', + 'ifb1' etc. + Look at the iproute2 documentation directory for usage etc + config DUMMY tristate "Dummy net driver support" ---help--- diff --git a/drivers/net/Makefile b/drivers/net/Makefile index b74a7cb5bae..00e72b12fb9 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -125,6 +125,7 @@ ifeq ($(CONFIG_SLIP_COMPRESSED),y) endif obj-$(CONFIG_DUMMY) += dummy.o +obj-$(CONFIG_IFB) += ifb.o obj-$(CONFIG_DE600) += de600.o obj-$(CONFIG_DE620) += de620.o obj-$(CONFIG_LANCE) += lance.o diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c new file mode 100644 index 00000000000..1b699259b4e --- /dev/null +++ b/drivers/net/ifb.c @@ -0,0 +1,294 @@ +/* drivers/net/ifb.c: + + The purpose of this driver is to provide a device that allows + for sharing of resources: + + 1) qdiscs/policies that are per device as opposed to system wide. + ifb allows for a device which can be redirected to thus providing + an impression of sharing. + + 2) Allows for queueing incoming traffic for shaping instead of + dropping. + + The original concept is based on what is known as the IMQ + driver initially written by Martin Devera, later rewritten + by Patrick McHardy and then maintained by Andre Correa. + + You need the tc action mirror or redirect to feed this device + packets. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; either version + 2 of the License, or (at your option) any later version. + + Authors: Jamal Hadi Salim (2005) + +*/ + + +#include +#include +#include +#include +#include +#include +#include +#include + +#define TX_TIMEOUT (2*HZ) + +#define TX_Q_LIMIT 32 +struct ifb_private { + struct net_device_stats stats; + struct tasklet_struct ifb_tasklet; + int tasklet_pending; + /* mostly debug stats leave in for now */ + unsigned long st_task_enter; /* tasklet entered */ + unsigned long st_txq_refl_try; /* transmit queue refill attempt */ + unsigned long st_rxq_enter; /* receive queue entered */ + unsigned long st_rx2tx_tran; /* receive to trasmit transfers */ + unsigned long st_rxq_notenter; /*receiveQ not entered, resched */ + unsigned long st_rx_frm_egr; /* received from egress path */ + unsigned long st_rx_frm_ing; /* received from ingress path */ + unsigned long st_rxq_check; + unsigned long st_rxq_rsch; + struct sk_buff_head rq; + struct sk_buff_head tq; +}; + +static int numifbs = 1; + +static void ri_tasklet(unsigned long dev); +static int ifb_xmit(struct sk_buff *skb, struct net_device *dev); +static struct net_device_stats *ifb_get_stats(struct net_device *dev); +static int ifb_open(struct net_device *dev); +static int ifb_close(struct net_device *dev); + +static void ri_tasklet(unsigned long dev) +{ + + struct net_device *_dev = (struct net_device *)dev; + struct ifb_private *dp = netdev_priv(_dev); + struct net_device_stats *stats = &dp->stats; + struct sk_buff *skb; + + dp->st_task_enter++; + if ((skb = skb_peek(&dp->tq)) == NULL) { + dp->st_txq_refl_try++; + if (spin_trylock(&_dev->xmit_lock)) { + dp->st_rxq_enter++; + while ((skb = skb_dequeue(&dp->rq)) != NULL) { + skb_queue_tail(&dp->tq, skb); + dp->st_rx2tx_tran++; + } + spin_unlock(&_dev->xmit_lock); + } else { + /* reschedule */ + dp->st_rxq_notenter++; + goto resched; + } + } + + while ((skb = skb_dequeue(&dp->tq)) != NULL) { + u32 from = G_TC_FROM(skb->tc_verd); + + skb->tc_verd = 0; + skb->tc_verd = SET_TC_NCLS(skb->tc_verd); + stats->tx_packets++; + stats->tx_bytes +=skb->len; + if (from & AT_EGRESS) { + dp->st_rx_frm_egr++; + dev_queue_xmit(skb); + } else if (from & AT_INGRESS) { + + dp->st_rx_frm_ing++; + netif_rx(skb); + } else { + dev_kfree_skb(skb); + stats->tx_dropped++; + } + } + + if (spin_trylock(&_dev->xmit_lock)) { + dp->st_rxq_check++; + if ((skb = skb_peek(&dp->rq)) == NULL) { + dp->tasklet_pending = 0; + if (netif_queue_stopped(_dev)) + netif_wake_queue(_dev); + } else { + dp->st_rxq_rsch++; + spin_unlock(&_dev->xmit_lock); + goto resched; + } + spin_unlock(&_dev->xmit_lock); + } else { +resched: + dp->tasklet_pending = 1; + tasklet_schedule(&dp->ifb_tasklet); + } + +} + +static void __init ifb_setup(struct net_device *dev) +{ + /* Initialize the device structure. */ + dev->get_stats = ifb_get_stats; + dev->hard_start_xmit = ifb_xmit; + dev->open = &ifb_open; + dev->stop = &ifb_close; + + /* Fill in device structure with ethernet-generic values. */ + ether_setup(dev); + dev->tx_queue_len = TX_Q_LIMIT; + dev->change_mtu = NULL; + dev->flags |= IFF_NOARP; + dev->flags &= ~IFF_MULTICAST; + SET_MODULE_OWNER(dev); + random_ether_addr(dev->dev_addr); +} + +static int ifb_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ifb_private *dp = netdev_priv(dev); + struct net_device_stats *stats = &dp->stats; + int ret = 0; + u32 from = G_TC_FROM(skb->tc_verd); + + stats->tx_packets++; + stats->tx_bytes+=skb->len; + + if (!from || !skb->input_dev) { +dropped: + dev_kfree_skb(skb); + stats->rx_dropped++; + return ret; + } else { + /* + * note we could be going + * ingress -> egress or + * egress -> ingress + */ + skb->dev = skb->input_dev; + skb->input_dev = dev; + if (from & AT_INGRESS) { + skb_pull(skb, skb->dev->hard_header_len); + } else { + if (!(from & AT_EGRESS)) { + goto dropped; + } + } + } + + if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) { + netif_stop_queue(dev); + } + + dev->trans_start = jiffies; + skb_queue_tail(&dp->rq, skb); + if (!dp->tasklet_pending) { + dp->tasklet_pending = 1; + tasklet_schedule(&dp->ifb_tasklet); + } + + return ret; +} + +static struct net_device_stats *ifb_get_stats(struct net_device *dev) +{ + struct ifb_private *dp = netdev_priv(dev); + struct net_device_stats *stats = &dp->stats; + + pr_debug("tasklets stats %ld:%ld:%ld:%ld:%ld:%ld:%ld:%ld:%ld \n", + dp->st_task_enter, dp->st_txq_refl_try, dp->st_rxq_enter, + dp->st_rx2tx_tran dp->st_rxq_notenter, dp->st_rx_frm_egr, + dp->st_rx_frm_ing, dp->st_rxq_check, dp->st_rxq_rsch ); + + return stats; +} + +static struct net_device **ifbs; + +/* Number of ifb devices to be set up by this module. */ +module_param(numifbs, int, 0); +MODULE_PARM_DESC(numifbs, "Number of ifb devices"); + +static int ifb_close(struct net_device *dev) +{ + struct ifb_private *dp = netdev_priv(dev); + + tasklet_kill(&dp->ifb_tasklet); + netif_stop_queue(dev); + skb_queue_purge(&dp->rq); + skb_queue_purge(&dp->tq); + return 0; +} + +static int ifb_open(struct net_device *dev) +{ + struct ifb_private *dp = netdev_priv(dev); + + tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev); + skb_queue_head_init(&dp->rq); + skb_queue_head_init(&dp->tq); + netif_start_queue(dev); + + return 0; +} + +static int __init ifb_init_one(int index) +{ + struct net_device *dev_ifb; + int err; + + dev_ifb = alloc_netdev(sizeof(struct ifb_private), + "ifb%d", ifb_setup); + + if (!dev_ifb) + return -ENOMEM; + + if ((err = register_netdev(dev_ifb))) { + free_netdev(dev_ifb); + dev_ifb = NULL; + } else { + ifbs[index] = dev_ifb; + } + + return err; +} + +static void ifb_free_one(int index) +{ + unregister_netdev(ifbs[index]); + free_netdev(ifbs[index]); +} + +static int __init ifb_init_module(void) +{ + int i, err = 0; + ifbs = kmalloc(numifbs * sizeof(void *), GFP_KERNEL); + if (!ifbs) + return -ENOMEM; + for (i = 0; i < numifbs && !err; i++) + err = ifb_init_one(i); + if (err) { + while (--i >= 0) + ifb_free_one(i); + } + + return err; +} + +static void __exit ifb_cleanup_module(void) +{ + int i; + + for (i = 0; i < numifbs; i++) + ifb_free_one(i); + kfree(ifbs); +} + +module_init(ifb_init_module); +module_exit(ifb_cleanup_module); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jamal Hadi Salim"); -- cgit v1.2.3