aboutsummaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig57
-rw-r--r--drivers/net/Makefile4
-rw-r--r--drivers/net/arm/ether1.c3
-rw-r--r--drivers/net/arm/ether3.c5
-rw-r--r--drivers/net/arm/etherh.c1
-rw-r--r--drivers/net/b44.c3
-rw-r--r--drivers/net/bfin_mac.c1009
-rw-r--r--drivers/net/bfin_mac.h132
-rw-r--r--drivers/net/bnx2.c103
-rw-r--r--drivers/net/bnx2.h10
-rw-r--r--drivers/net/bsd_comp.c3
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c37
-rw-r--r--drivers/net/forcedeth.c6
-rw-r--r--drivers/net/gianfar.c12
-rw-r--r--drivers/net/hamradio/baycom_epp.c2
-rw-r--r--drivers/net/hamradio/dmascc.c6
-rw-r--r--drivers/net/irda/Kconfig11
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/ep7211-sir.c89
-rw-r--r--drivers/net/irda/irport.c3
-rw-r--r--drivers/net/irda/irtty-sir.c3
-rw-r--r--drivers/net/iseries_veth.c6
-rw-r--r--drivers/net/lance.c3
-rw-r--r--drivers/net/lguest_net.c354
-rw-r--r--drivers/net/mac89x0.c2
-rw-r--r--drivers/net/mlx4/catas.c106
-rw-r--r--drivers/net/mlx4/eq.c56
-rw-r--r--drivers/net/mlx4/intf.c2
-rw-r--r--drivers/net/mlx4/main.c26
-rw-r--r--drivers/net/mlx4/mlx4.h13
-rw-r--r--drivers/net/ni5010.c6
-rw-r--r--drivers/net/ns83820.c2
-rw-r--r--drivers/net/pcmcia/com20020_cs.c3
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c3
-rw-r--r--drivers/net/phy/vitesse.c46
-rw-r--r--drivers/net/ppp_async.c3
-rw-r--r--drivers/net/ppp_deflate.c6
-rw-r--r--drivers/net/ppp_generic.c3
-rw-r--r--drivers/net/ppp_mppe.c3
-rw-r--r--drivers/net/ppp_synctty.c3
-rw-r--r--drivers/net/pppol2tp.c18
-rw-r--r--drivers/net/saa9730.c9
-rw-r--r--drivers/net/shaper.c3
-rw-r--r--drivers/net/sky2.c7
-rw-r--r--drivers/net/sunvnet.c399
-rw-r--r--drivers/net/sunvnet.h15
-rw-r--r--drivers/net/tc35815.c2
-rw-r--r--drivers/net/tg3.c114
-rw-r--r--drivers/net/tg3.h1
-rw-r--r--drivers/net/wan/c101.c3
-rw-r--r--drivers/net/wan/cosa.c4
-rw-r--r--drivers/net/wan/cycx_main.c4
-rw-r--r--drivers/net/wan/cycx_x25.c3
-rw-r--r--drivers/net/wan/dscc4.c6
-rw-r--r--drivers/net/wan/farsync.c3
-rw-r--r--drivers/net/wan/hostess_sv11.c3
-rw-r--r--drivers/net/wan/n2.c3
-rw-r--r--drivers/net/wan/pc300_drv.c3
-rw-r--r--drivers/net/wan/pc300too.c3
-rw-r--r--drivers/net/wan/pci200syn.c3
-rw-r--r--drivers/net/wan/sdla.c3
-rw-r--r--drivers/net/wan/sealevel.c3
-rw-r--r--drivers/net/wan/wanxl.c3
-rw-r--r--drivers/net/wan/x25_asy.c4
-rw-r--r--drivers/net/wireless/ipw2100.c6
-rw-r--r--drivers/net/wireless/ipw2200.c18
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/xen-netfront.c1863
69 files changed, 4218 insertions, 438 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 43d03178064..f8a602caabc 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -5,6 +5,7 @@
menuconfig NETDEVICES
default y if UML
+ depends on NET
bool "Network device support"
---help---
You can say N here if you don't intend to connect your Linux box to
@@ -838,6 +839,50 @@ config ULTRA32
<file:Documentation/networking/net-modules.txt>. The module
will be called smc-ultra32.
+config BFIN_MAC
+ tristate "Blackfin 536/537 on-chip mac support"
+ depends on NET_ETHERNET && (BF537 || BF536) && (!BF537_PORT_H)
+ select CRC32
+ select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE
+ help
+ This is the driver for blackfin on-chip mac device. Say Y if you want it
+ compiled into the kernel. This driver is also available as a module
+ ( = code which can be inserted in and removed from the running kernel
+ whenever you want). The module will be called bfin_mac.
+
+config BFIN_MAC_USE_L1
+ bool "Use L1 memory for rx/tx packets"
+ depends on BFIN_MAC && BF537
+ default y
+ help
+ To get maximum network performace, you should use L1 memory as rx/tx buffers.
+ Say N here if you want to reserve L1 memory for other uses.
+
+config BFIN_TX_DESC_NUM
+ int "Number of transmit buffer packets"
+ depends on BFIN_MAC
+ range 6 10 if BFIN_MAC_USE_L1
+ range 10 100
+ default "10"
+ help
+ Set the number of buffer packets used in driver.
+
+config BFIN_RX_DESC_NUM
+ int "Number of receive buffer packets"
+ depends on BFIN_MAC
+ range 20 100 if BFIN_MAC_USE_L1
+ range 20 800
+ default "20"
+ help
+ Set the number of buffer packets used in driver.
+
+config BFIN_MAC_RMII
+ bool "RMII PHY Interface (EXPERIMENTAL)"
+ depends on BFIN_MAC && EXPERIMENTAL
+ default n
+ help
+ Use Reduced PHY MII Interface
+
config SMC9194
tristate "SMC 9194 support"
depends on NET_VENDOR_SMC && (ISA || MAC && BROKEN)
@@ -2486,6 +2531,18 @@ source "drivers/atm/Kconfig"
source "drivers/s390/net/Kconfig"
+config XEN_NETDEV_FRONTEND
+ tristate "Xen network device frontend driver"
+ depends on XEN
+ default y
+ help
+ The network device frontend driver allows the kernel to
+ access network devices exported exported by a virtual
+ machine containing a physical network device driver. The
+ frontend driver is intended for unprivileged guest domains;
+ if you are compiling a kernel for a Xen guest, you almost
+ certainly want to enable this.
+
config ISERIES_VETH
tristate "iSeries Virtual Ethernet driver support"
depends on PPC_ISERIES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index eb4167622a6..336af0635df 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -127,6 +127,8 @@ obj-$(CONFIG_PPPOL2TP) += pppox.o pppol2tp.o
obj-$(CONFIG_SLIP) += slip.o
obj-$(CONFIG_SLHC) += slhc.o
+obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
+
obj-$(CONFIG_DUMMY) += dummy.o
obj-$(CONFIG_IFB) += ifb.o
obj-$(CONFIG_MACVLAN) += macvlan.o
@@ -175,6 +177,7 @@ obj-$(CONFIG_ZORRO8390) += zorro8390.o
obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
obj-$(CONFIG_EQUALIZER) += eql.o
+obj-$(CONFIG_LGUEST_GUEST) += lguest_net.o
obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o
obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o
@@ -198,6 +201,7 @@ obj-$(CONFIG_S2IO) += s2io.o
obj-$(CONFIG_MYRI10GE) += myri10ge/
obj-$(CONFIG_SMC91X) += smc91x.o
obj-$(CONFIG_SMC911X) += smc911x.o
+obj-$(CONFIG_BFIN_MAC) += bfin_mac.o
obj-$(CONFIG_DM9000) += dm9000.o
obj-$(CONFIG_FEC_8XX) += fec_8xx/
obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o
diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c
index f21148e7b57..80f33b6d571 100644
--- a/drivers/net/arm/ether1.c
+++ b/drivers/net/arm/ether1.c
@@ -36,7 +36,6 @@
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
-#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/slab.h>
@@ -75,7 +74,7 @@ static void ether1_timeout(struct net_device *dev);
/* ------------------------------------------------------------------------- */
-static char version[] __initdata = "ether1 ethernet driver (c) 2000 Russell King v1.07\n";
+static char version[] __devinitdata = "ether1 ethernet driver (c) 2000 Russell King v1.07\n";
#define BUS_16 16
#define BUS_8 8
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index da713500654..3805506a3ab 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -51,7 +51,6 @@
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
-#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/slab.h>
@@ -69,7 +68,7 @@
#include <asm/ecard.h>
#include <asm/io.h>
-static char version[] __initdata = "ether3 ethernet driver (c) 1995-2000 R.M.King v1.17\n";
+static char version[] __devinitdata = "ether3 ethernet driver (c) 1995-2000 R.M.King v1.17\n";
#include "ether3.h"
@@ -464,7 +463,7 @@ static void ether3_setmulticastlist(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
/* promiscuous mode */
priv(dev)->regs.config1 |= CFG1_RECVPROMISC;
- } else if (dev->flags & IFF_ALLMULTI) {
+ } else if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI;
} else
priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD;
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c
index 769ba69451f..0d37d9d1fd7 100644
--- a/drivers/net/arm/etherh.c
+++ b/drivers/net/arm/etherh.c
@@ -31,7 +31,6 @@
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
-#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/slab.h>
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 96fb0ec905a..37f1b6ff5c1 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -1519,14 +1519,13 @@ static void b44_setup_pseudo_magicp(struct b44 *bp)
u8 *pwol_pattern;
u8 pwol_mask[B44_PMASK_SIZE];
- pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
+ pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
if (!pwol_pattern) {
printk(KERN_ERR PFX "Memory not available for WOL\n");
return;
}
/* Ipv4 magic packet pattern - pattern 0.*/
- memset(pwol_pattern, 0, B44_PATTERN_SIZE);
memset(pwol_mask, 0, B44_PMASK_SIZE);
plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
B44_ETHIPV4UDP_HLEN);
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
new file mode 100644
index 00000000000..9a08d656f1c
--- /dev/null
+++ b/drivers/net/bfin_mac.c
@@ -0,0 +1,1009 @@
+/*
+ * File: drivers/net/bfin_mac.c
+ * Based on:
+ * Maintainer:
+ * Bryan Wu <bryan.wu@analog.com>
+ *
+ * Original author:
+ * Luke Yang <luke.yang@analog.com>
+ *
+ * Created:
+ * Description:
+ *
+ * Modified:
+ * Copyright 2004-2006 Analog Devices Inc.
+ *
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program ; see the file COPYING.
+ * If not, write to the Free Software Foundation,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/crc32.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/dma.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/blackfin.h>
+#include <asm/cacheflush.h>
+#include <asm/portmux.h>
+
+#include "bfin_mac.h"
+
+#define DRV_NAME "bfin_mac"
+#define DRV_VERSION "1.1"
+#define DRV_AUTHOR "Bryan Wu, Luke Yang"
+#define DRV_DESC "Blackfin BF53[67] on-chip Ethernet MAC driver"
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRV_DESC);
+
+#if defined(CONFIG_BFIN_MAC_USE_L1)
+# define bfin_mac_alloc(dma_handle, size) l1_data_sram_zalloc(size)
+# define bfin_mac_free(dma_handle, ptr) l1_data_sram_free(ptr)
+#else
+# define bfin_mac_alloc(dma_handle, size) \
+ dma_alloc_coherent(NULL, size, dma_handle, GFP_KERNEL)
+# define bfin_mac_free(dma_handle, ptr) \
+ dma_free_coherent(NULL, sizeof(*ptr), ptr, dma_handle)
+#endif
+
+#define PKT_BUF_SZ 1580
+
+#define MAX_TIMEOUT_CNT 500
+
+/* pointers to maintain transmit list */
+static struct net_dma_desc_tx *tx_list_head;
+static struct net_dma_desc_tx *tx_list_tail;
+static struct net_dma_desc_rx *rx_list_head;
+static struct net_dma_desc_rx *rx_list_tail;
+static struct net_dma_desc_rx *current_rx_ptr;
+static struct net_dma_desc_tx *current_tx_ptr;
+static struct net_dma_desc_tx *tx_desc;
+static struct net_dma_desc_rx *rx_desc;
+
+static void desc_list_free(void)
+{
+ struct net_dma_desc_rx *r;
+ struct net_dma_desc_tx *t;
+ int i;
+#if !defined(CONFIG_BFIN_MAC_USE_L1)
+ dma_addr_t dma_handle = 0;
+#endif
+
+ if (tx_desc) {
+ t = tx_list_head;
+ for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
+ if (t) {
+ if (t->skb) {
+ dev_kfree_skb(t->skb);
+ t->skb = NULL;
+ }
+ t = t->next;
+ }
+ }
+ bfin_mac_free(dma_handle, tx_desc);
+ }
+
+ if (rx_desc) {
+ r = rx_list_head;
+ for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
+ if (r) {
+ if (r->skb) {
+ dev_kfree_skb(r->skb);
+ r->skb = NULL;
+ }
+ r = r->next;
+ }
+ }
+ bfin_mac_free(dma_handle, rx_desc);
+ }
+}
+
+static int desc_list_init(void)
+{
+ int i;
+ struct sk_buff *new_skb;
+#if !defined(CONFIG_BFIN_MAC_USE_L1)
+ /*
+ * This dma_handle is useless in Blackfin dma_alloc_coherent().
+ * The real dma handler is the return value of dma_alloc_coherent().
+ */
+ dma_addr_t dma_handle;
+#endif
+
+ tx_desc = bfin_mac_alloc(&dma_handle,
+ sizeof(struct net_dma_desc_tx) *
+ CONFIG_BFIN_TX_DESC_NUM);
+ if (tx_desc == NULL)
+ goto init_error;
+
+ rx_desc = bfin_mac_alloc(&dma_handle,
+ sizeof(struct net_dma_desc_rx) *
+ CONFIG_BFIN_RX_DESC_NUM);
+ if (rx_desc == NULL)
+ goto init_error;
+
+ /* init tx_list */
+ tx_list_head = tx_list_tail = tx_desc;
+
+ for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
+ struct net_dma_desc_tx *t = tx_desc + i;
+ struct dma_descriptor *a = &(t->desc_a);
+ struct dma_descriptor *b = &(t->desc_b);
+
+ /*
+ * disable DMA
+ * read from memory WNR = 0
+ * wordsize is 32 bits
+ * 6 half words is desc size
+ * large desc flow
+ */
+ a->config = WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
+ a->start_addr = (unsigned long)t->packet;
+ a->x_count = 0;
+ a->next_dma_desc = b;
+
+ /*
+ * enabled DMA
+ * write to memory WNR = 1
+ * wordsize is 32 bits
+ * disable interrupt
+ * 6 half words is desc size
+ * large desc flow
+ */
+ b->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
+ b->start_addr = (unsigned long)(&(t->status));
+ b->x_count = 0;
+
+ t->skb = NULL;
+ tx_list_tail->desc_b.next_dma_desc = a;
+ tx_list_tail->next = t;
+ tx_list_tail = t;
+ }
+ tx_list_tail->next = tx_list_head; /* tx_list is a circle */
+ tx_list_tail->desc_b.next_dma_desc = &(tx_list_head->desc_a);
+ current_tx_ptr = tx_list_head;
+
+ /* init rx_list */
+ rx_list_head = rx_list_tail = rx_desc;
+
+ for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
+ struct net_dma_desc_rx *r = rx_desc + i;
+ struct dma_descriptor *a = &(r->desc_a);
+ struct dma_descriptor *b = &(r->desc_b);
+
+ /* allocate a new skb for next time receive */
+ new_skb = dev_alloc_skb(PKT_BUF_SZ + 2);
+ if (!new_skb) {
+ printk(KERN_NOTICE DRV_NAME
+ ": init: low on mem - packet dropped\n");
+ goto init_error;
+ }
+ skb_reserve(new_skb, 2);
+ r->skb = new_skb;
+
+ /*
+ * enabled DMA
+ * write to memory WNR = 1
+ * wordsize is 32 bits
+ * disable interrupt
+ * 6 half words is desc size
+ * large desc flow
+ */
+ a->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
+ /* since RXDWA is enabled */
+ a->start_addr = (unsigned long)new_skb->data - 2;
+ a->x_count = 0;
+ a->next_dma_desc = b;
+
+ /*
+ * enabled DMA
+ * write to memory WNR = 1
+ * wordsize is 32 bits
+ * enable interrupt
+ * 6 half words is desc size
+ * large desc flow
+ */
+ b->config = DMAEN | WNR | WDSIZE_32 | DI_EN |
+ NDSIZE_6 | DMAFLOW_LARGE;
+ b->start_addr = (unsigned long)(&(r->status));
+ b->x_count = 0;
+
+ rx_list_tail->desc_b.next_dma_desc = a;
+ rx_list_tail->next = r;
+ rx_list_tail = r;
+ }
+ rx_list_tail->next = rx_list_head; /* rx_list is a circle */
+ rx_list_tail->desc_b.next_dma_desc = &(rx_list_head->desc_a);
+ current_rx_ptr = rx_list_head;
+
+ return 0;
+
+init_error:
+ desc_list_free();
+ printk(KERN_ERR DRV_NAME ": kmalloc failed\n");
+ return -ENOMEM;
+}
+
+
+/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
+
+/* Set FER regs to MUX in Ethernet pins */
+static int setup_pin_mux(int action)
+{
+#if defined(CONFIG_BFIN_MAC_RMII)
+ u16 pin_req[] = P_RMII0;
+#else
+ u16 pin_req[] = P_MII0;
+#endif
+
+ if (action) {
+ if (peripheral_request_list(pin_req, DRV_NAME)) {
+ printk(KERN_ERR DRV_NAME
+ ": Requesting Peripherals failed\n");
+ return -EFAULT;
+ }
+ } else
+ peripheral_free_list(pin_req);
+
+ return 0;
+}
+
+/* Wait until the previous MDC/MDIO transaction has completed */
+static void poll_mdc_done(void)
+{
+ int timeout_cnt = MAX_TIMEOUT_CNT;
+
+ /* poll the STABUSY bit */
+ while ((bfin_read_EMAC_STAADD()) & STABUSY) {
+ mdelay(10);
+ if (timeout_cnt-- < 0) {
+ printk(KERN_ERR DRV_NAME
+ ": wait MDC/MDIO transaction to complete timeout\n");
+ break;
+ }
+ }
+}
+
+/* Read an off-chip register in a PHY through the MDC/MDIO port */
+static u16 read_phy_reg(u16 PHYAddr, u16 RegAddr)
+{
+ poll_mdc_done();
+ /* read mode */
+ bfin_write_EMAC_STAADD(SET_PHYAD(PHYAddr) |
+ SET_REGAD(RegAddr) |
+ STABUSY);
+ poll_mdc_done();
+
+ return (u16) bfin_read_EMAC_STADAT();
+}
+
+/* Write an off-chip register in a PHY through the MDC/MDIO port */
+static void raw_write_phy_reg(u16 PHYAddr, u16 RegAddr, u32 Data)
+{
+ bfin_write_EMAC_STADAT(Data);
+
+ /* write mode */
+ bfin_write_EMAC_STAADD(SET_PHYAD(PHYAddr) |
+ SET_REGAD(RegAddr) |
+ STAOP |
+ STABUSY);
+
+ poll_mdc_done();
+}
+
+static void write_phy_reg(u16 PHYAddr, u16 RegAddr, u32 Data)
+{
+ poll_mdc_done();
+ raw_write_phy_reg(PHYAddr, RegAddr, Data);
+}
+
+/* set up the phy */
+static void bf537mac_setphy(struct net_device *dev)
+{
+ u16 phydat;
+ struct bf537mac_local *lp = netdev_priv(dev);
+
+ /* Program PHY registers */
+ pr_debug("start setting up phy\n");
+
+ /* issue a reset */
+ raw_write_phy_reg(lp->PhyAddr, PHYREG_MODECTL, 0x8000);
+
+ /* wait half a second */
+ msleep(500);
+
+ phydat = read_phy_reg(lp->PhyAddr, PHYREG_MODECTL);
+
+ /* advertise flow control supported */
+ phydat = read_phy_reg(lp->PhyAddr, PHYREG_ANAR);
+ phydat |= (1 << 10);
+ write_phy_reg(lp->PhyAddr, PHYREG_ANAR, phydat);
+
+ phydat = 0;
+ if (lp->Negotiate)
+ phydat |= 0x1000; /* enable auto negotiation */
+ else {
+ if (lp->FullDuplex)
+ phydat |= (1 << 8); /* full duplex */
+ else
+ phydat &= (~(1 << 8)); /* half duplex */
+
+ if (!lp->Port10)
+ phydat |= (1 << 13); /* 100 Mbps */
+ else
+ phydat &= (~(1 << 13)); /* 10 Mbps */
+ }
+
+ if (lp->Loopback)
+ phydat |= (1 << 14); /* enable TX->RX loopback */
+
+ write_phy_reg(lp->PhyAddr, PHYREG_MODECTL, phydat);
+ msleep(500);
+
+ phydat = read_phy_reg(lp->PhyAddr, PHYREG_MODECTL);
+ /* check for SMSC PHY */
+ if ((read_phy_reg(lp->PhyAddr, PHYREG_PHYID1) == 0x7) &&
+ ((read_phy_reg(lp->PhyAddr, PHYREG_PHYID2) & 0xfff0) == 0xC0A0)) {
+ /*
+ * we have SMSC PHY so reqest interrupt
+ * on link down condition
+ */
+
+ /* enable interrupts */
+ write_phy_reg(lp->PhyAddr, 30, 0x0ff);
+ }
+}
+
+/**************************************************************************/
+void setup_system_regs(struct net_device *dev)
+{
+ int phyaddr;
+ unsigned short sysctl, phydat;
+ u32 opmode;
+ struct bf537mac_local *lp = netdev_priv(dev);
+ int count = 0;
+
+ phyaddr = lp->PhyAddr;
+
+ /* Enable PHY output */
+ if (!(bfin_read_VR_CTL() & PHYCLKOE))
+ bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE);
+
+ /* MDC = 2.5 MHz */
+ sysctl = SET_MDCDIV(24);
+ /* Odd word alignment for Receive Frame DMA word */
+ /* Configure checksum support and rcve frame word alignment */
+#if defined(BFIN_MAC_CSUM_OFFLOAD)
+ sysctl |= RXDWA | RXCKS;
+#else
+ sysctl |= RXDWA;
+#endif
+ bfin_write_EMAC_SYSCTL(sysctl);
+ /* auto negotiation on */
+ /* full duplex */
+ /* 100 Mbps */
+ phydat = PHY_ANEG_EN | PHY_DUPLEX | PHY_SPD_SET;
+ write_phy_reg(phyaddr, PHYREG_MODECTL, phydat);
+
+ /* test if full duplex supported */
+ do {
+ msleep(100);
+ phydat = read_phy_reg(phyaddr, PHYREG_MODESTAT);
+ if (count > 30) {
+ printk(KERN_NOTICE DRV_NAME ": Link is down\n");
+ printk(KERN_NOTICE DRV_NAME
+ "please check your network connection\n");
+ break;
+ }
+ count++;
+ } while (!(phydat & 0x0004));
+
+ phydat = read_phy_reg(phyaddr, PHYREG_ANLPAR);
+
+ if ((phydat & 0x0100) || (phydat & 0x0040)) {
+ opmode = FDMODE;
+ } else {
+ opmode = 0;
+ printk(KERN_INFO DRV_NAME
+ ": Network is set to half duplex\n");
+ }
+
+#if defined(CONFIG_BFIN_MAC_RMII)
+ opmode |= RMII; /* For Now only 100MBit are supported */
+#endif
+
+ bfin_write_EMAC_OPMODE(opmode);
+
+ bfin_write_EMAC_MMC_CTL(RSTC | CROLL);
+
+ /* Initialize the TX DMA channel registers */
+ bfin_write_DMA2_X_COUNT(0);
+ bfin_write_DMA2_X_MODIFY(4);
+ bfin_write_DMA2_Y_COUNT(0);
+ bfin_write_DMA2_Y_MODIFY(0);
+
+ /* Initialize the RX DMA channel registers */
+ bfin_write_DMA1_X_COUNT(0);
+ bfin_write_DMA1_X_MODIFY(4);
+ bfin_write_DMA1_Y_COUNT(0);
+ bfin_write_DMA1_Y_MODIFY(0);
+}
+
+void setup_mac_addr(u8 * mac_addr)
+{
+ u32 addr_low = le32_to_cpu(*(__le32 *) & mac_addr[0]);
+ u16 addr_hi = le16_to_cpu(*(__le16 *) & mac_addr[4]);
+
+ /* this depends on a little-endian machine */
+ bfin_write_EMAC_ADDRLO(addr_low);
+ bfin_write_EMAC_ADDRHI(addr_hi);
+}
+
+static void adjust_tx_list(void)
+{
+ int timeout_cnt = MAX_TIMEOUT_CNT;
+
+ if (tx_list_head->status.status_word != 0
+ && current_tx_ptr != tx_list_head) {
+ goto adjust_head; /* released something, just return; */
+ }
+
+ /*
+ * if nothing released, check wait condition
+ * current's next can not be the head,
+ * otherwise the dma will not stop as we want
+ */
+ if (current_tx_ptr->next->next == tx_list_head) {
+ while (tx_list_head->status.status_word == 0) {
+ mdelay(10);
+ if (tx_list_head->status.status_word != 0
+ || !(bfin_read_DMA2_IRQ_STATUS() & 0x08)) {
+ goto adjust_head;
+ }
+ if (timeout_cnt-- < 0) {
+ printk(KERN_ERR DRV_NAME
+ ": wait for adjust tx list head timeout\n");
+ break;
+ }
+ }
+ if (tx_list_head->status.status_word != 0) {
+ goto adjust_head;
+ }
+ }
+
+ return;
+
+adjust_head:
+ do {
+ tx_list_head->desc_a.config &= ~DMAEN;
+ tx_list_head->status.status_word = 0;
+ if (tx_list_head->skb) {
+ dev_kfree_skb(tx_list_head->skb);
+ tx_list_head->skb = NULL;
+ } else {
+ printk(KERN_ERR DRV_NAME
+ ": no sk_buff in a transmitted frame!\n");
+ }
+ tx_list_head = tx_list_head->next;
+ } while (tx_list_head->status.status_word != 0
+ && current_tx_ptr != tx_list_head);
+ return;
+
+}
+
+static int bf537mac_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct bf537mac_local *lp = netdev_priv(dev);
+ unsigned int data;
+
+ current_tx_ptr->skb = skb;
+
+ /*
+ * Is skb->data always 16-bit aligned?
+ * Do we need to memcpy((char *)(tail->packet + 2), skb->data, len)?
+ */
+ if ((((unsigned int)(skb->data)) & 0x02) == 2) {
+ /* move skb->data to current_tx_ptr payload */
+ data = (unsigned int)(skb->data) - 2;
+ *((unsigned short *)data) = (unsigned short)(skb->len);
+ current_tx_ptr->desc_a.start_addr = (unsigned long)data;
+ /* this is important! */
+ blackfin_dcache_flush_range(data, (data + (skb->len)) + 2);
+
+ } else {
+ *((unsigned short *)(current_tx_ptr->packet)) =
+ (unsigned short)(skb->len);
+ memcpy((char *)(current_tx_ptr->packet + 2), skb->data,
+ (skb->len));
+ current_tx_ptr->desc_a.start_addr =
+ (unsigned long)current_tx_ptr->packet;
+ if (current_tx_ptr->status.status_word != 0)
+ current_tx_ptr->status.status_word = 0;
+ blackfin_dcache_flush_range((unsigned int)current_tx_ptr->
+ packet,
+ (unsigned int)(current_tx_ptr->
+ packet + skb->len) +
+ 2);
+ }
+
+ /* enable this packet's dma */
+ current_tx_ptr->desc_a.config |= DMAEN;
+
+ /* tx dma is running, just return */
+ if (bfin_read_DMA2_IRQ_STATUS() & 0x08)
+ goto out;
+
+ /* tx dma is not running */
+ bfin_write_DMA2_NEXT_DESC_PTR(&(current_tx_ptr->desc_a));
+ /* dma enabled, read from memory, size is 6 */
+ bfin_write_DMA2_CONFIG(current_tx_ptr->desc_a.config);
+ /* Turn on the EMAC tx */
+ bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
+
+out:
+ adjust_tx_list();
+ current_tx_ptr = current_tx_ptr->next;
+ dev->trans_start = jiffies;
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += (skb->len);
+ return 0;
+}
+
+static void bf537mac_rx(struct net_device *dev)
+{
+ struct sk_buff *skb, *new_skb;
+ struct bf537mac_local *lp = netdev_priv(dev);
+ unsigned short len;
+
+ /* allocate a new skb for next time receive */
+ skb = current_rx_ptr->skb;
+ new_skb = dev_alloc_skb(PKT_BUF_SZ + 2);
+ if (!new_skb) {
+ printk(KERN_NOTICE DRV_NAME
+ ": rx: low on mem - packet dropped\n");
+ lp->stats.rx_dropped++;
+ goto out;
+ }
+ /* reserve 2 bytes for RXDWA padding */
+ skb_reserve(new_skb, 2);
+ current_rx_ptr->skb = new_skb;
+ current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
+
+ len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
+ skb_put(skb, len);
+ blackfin_dcache_invalidate_range((unsigned long)skb->head,
+ (unsigned long)skb->tail);
+
+ dev->last_rx = jiffies;
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+#if defined(BFIN_MAC_CSUM_OFFLOAD)
+ skb->csum = current_rx_ptr->status.ip_payload_csum;
+ skb->ip_summed = CHECKSUM_PARTIAL;
+#endif
+
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += len;
+ current_rx_ptr->status.status_word = 0x00000000;
+ current_rx_ptr = current_rx_ptr->next;
+
+out:
+ return;
+}
+
+/* interrupt routine to handle rx and error signal */
+static irqreturn_t bf537mac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ int number = 0;
+
+get_one_packet:
+ if (current_rx_ptr->status.status_word == 0) {
+ /* no more new packet received */
+ if (number == 0) {
+ if (current_rx_ptr->next->status.status_word != 0) {
+ current_rx_ptr = current_rx_ptr->next;
+ goto real_rx;
+ }
+ }
+ bfin_write_DMA1_IRQ_STATUS(bfin_read_DMA1_IRQ_STATUS() |
+ DMA_DONE | DMA_ERR);
+ return IRQ_HANDLED;
+ }
+
+real_rx:
+ bf537mac_rx(dev);
+ number++;
+ goto get_one_packet;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bf537mac_poll(struct net_device *dev)
+{
+ disable_irq(IRQ_MAC_RX);
+ bf537mac_interrupt(IRQ_MAC_RX, dev);
+ enable_irq(IRQ_MAC_RX);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+static void bf537mac_reset(void)
+{
+ unsigned int opmode;
+
+ opmode = bfin_read_EMAC_OPMODE();
+ opmode &= (~RE);
+ opmode &= (~TE);
+ /* Turn off the EMAC */
+ bfin_write_EMAC_OPMODE(opmode);
+}
+
+/*
+ * Enable Interrupts, Receive, and Transmit
+ */
+static int bf537mac_enable(struct net_device *dev)
+{
+ u32 opmode;
+
+ pr_debug("%s: %s\n", dev->name, __FUNCTION__);
+
+ /* Set RX DMA */
+ bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
+ bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config);
+
+ /* Wait MII done */
+ poll_mdc_done();
+
+ /* We enable only RX here */
+ /* ASTP : Enable Automatic Pad Stripping
+ PR : Promiscuous Mode for test
+ PSF : Receive frames with total length less than 64 bytes.
+ FDMODE : Full Duplex Mode
+ LB : Internal Loopback for test
+ RE : Receiver Enable */
+ opmode = bfin_read_EMAC_OPMODE();
+ if (opmode & FDMODE)
+ opmode |= PSF;
+ else
+ opmode |= DRO | DC | PSF;
+ opmode |= RE;
+
+#if defined(CONFIG_BFIN_MAC_RMII)
+ opmode |= RMII; /* For Now only 100MBit are supported */
+#ifdef CONFIG_BF_REV_0_2
+ opmode |= TE;
+#endif
+#endif
+ /* Turn on the EMAC rx */
+ bfin_write_EMAC_OPMODE(opmode);
+
+ return 0;
+}
+
+/* Our watchdog timed out. Called by the networking layer */
+static void bf537mac_timeout(struct net_device *dev)
+{
+ pr_debug("%s: %s\n", dev->name, __FUNCTION__);
+
+ bf537mac_reset();
+
+ /* reset tx queue */
+ tx_list_tail = tx_list_head->next;
+
+ bf537mac_enable(dev);
+
+ /* We can accept TX packets again */
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+/*
+ * Get the current statistics.
+ * This may be called with the card open or closed.
+ */
+static struct net_device_stats *bf537mac_query_statistics(struct net_device
+ *dev)
+{
+ struct bf537mac_local *lp = netdev_priv(dev);
+
+ pr_debug("%s: %s\n", dev->name, __FUNCTION__);
+
+ return &lp->stats;
+}
+
+/*
+ * This routine will, depending on the values passed to it,
+ * either make it accept multicast packets, go into
+ * promiscuous mode (for TCPDUMP and cousins) or accept
+ * a select set of multicast packets
+ */
+static void bf537mac_set_multicast_list(struct net_device *dev)
+{
+ u32 sysctl;
+
+ if (dev->flags & IFF_PROMISC) {
+ printk(KERN_INFO "%s: set to promisc mode\n", dev->name);
+ sysctl = bfin_read_EMAC_OPMODE();
+ sysctl |= RAF;
+ bfin_write_EMAC_OPMODE(sysctl);
+ } else if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
+ /* accept all multicast */
+ sysctl = bfin_read_EMAC_OPMODE();
+ sysctl |= PAM;
+ bfin_write_EMAC_OPMODE(sysctl);
+ } else {
+ /* clear promisc or multicast mode */
+ sysctl = bfin_read_EMAC_OPMODE();
+ sysctl &= ~(RAF | PAM);
+ bfin_write_EMAC_OPMODE(sysctl);
+ }
+}
+
+/*
+ * this puts the device in an inactive state
+ */
+static void bf537mac_shutdown(struct net_device *dev)
+{
+ /* Turn off the EMAC */
+ bfin_write_EMAC_OPMODE(0x00000000);
+ /* Turn off the EMAC RX DMA */
+ bfin_write_DMA1_CONFIG(0x0000);
+ bfin_write_DMA2_CONFIG(0x0000);
+}
+
+/*
+ * Open and Initialize the interface
+ *
+ * Set up everything, reset the card, etc..
+ */
+static int bf537mac_open(struct net_device *dev)
+{
+ pr_debug("%s: %s\n", dev->name, __FUNCTION__);
+
+ /*
+ * Check that the address is valid. If its not, refuse
+ * to bring the device up. The user must specify an
+ * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
+ */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ printk(KERN_WARNING DRV_NAME ": no valid ethernet hw addr\n");
+ return -EINVAL;
+ }
+
+ /* initial rx and tx list */
+ desc_list_init();
+
+ bf537mac_setphy(dev);
+ setup_system_regs(dev);
+ bf537mac_reset();
+ bf537mac_enable(dev);
+
+ pr_debug("hardware init finished\n");
+ netif_start_queue(dev);
+ netif_carrier_on(dev);
+
+ return 0;
+}
+
+/*
+ *
+ * this makes the board clean up everything that it can
+ * and not talk to the outside world. Caused by
+ * an 'ifconfig ethX down'
+ */
+static int bf537mac_close(struct net_device *dev)
+{
+ pr_debug("%s: %s\n", dev->name, __FUNCTION__);
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+
+ /* clear everything */
+ bf537mac_shutdown(dev);
+
+ /* free the rx/tx buffers */
+ desc_list_free();
+
+ return 0;
+}
+
+static int __init bf537mac_probe(struct net_device *dev)
+{
+ struct bf537mac_local *lp = netdev_priv(dev);
+ int retval;
+
+ /* Grab the MAC address in the MAC */
+ *(__le32 *) (&(dev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
+ *(__le16 *) (&(dev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI());
+
+ /* probe mac */
+ /*todo: how to proble? which is revision_register */
+ bfin_write_EMAC_ADDRLO(0x12345678);
+ if (bfin_read_EMAC_ADDRLO() != 0x12345678) {
+ pr_debug("can't detect bf537 mac!\n");
+ retval = -ENODEV;
+ goto err_out;
+ }
+
+ /* set the GPIO pins to Ethernet mode */
+ retval = setup_pin_mux(1);
+
+ if (retval)
+ return retval;
+
+ /*Is it valid? (Did bootloader initialize it?) */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ /* Grab the MAC from the board somehow - this is done in the
+ arch/blackfin/mach-bf537/boards/eth_mac.c */
+ get_bf537_ether_addr(dev->dev_addr);
+ }
+
+ /* If still not valid, get a random one */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ random_ether_addr(dev->dev_addr);
+ }
+
+ setup_mac_addr(dev->dev_addr);
+
+ /* Fill in the fields of the device structure with ethernet values. */
+ ether_setup(dev);
+
+ dev->open = bf537mac_open;
+ dev->stop = bf537mac_close;
+ dev->hard_start_xmit = bf537mac_hard_start_xmit;
+ dev->tx_timeout = bf537mac_timeout;
+ dev->get_stats = bf537mac_query_statistics;
+ dev->set_multicast_list = bf537mac_set_multicast_list;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = bf537mac_poll;
+#endif
+
+ /* fill in some of the fields */
+ lp->version = 1;
+ lp->PhyAddr = 0x01;
+ lp->CLKIN = 25;
+ lp->FullDuplex = 0;
+ lp->Negotiate = 1;
+ lp->FlowControl = 0;
+ spin_lock_init(&lp->lock);
+
+ /* now, enable interrupts */
+ /* register irq handler */
+ if (request_irq
+ (IRQ_MAC_RX, bf537mac_interrupt, IRQF_DISABLED | IRQF_SHARED,
+ "BFIN537_MAC_RX", dev)) {
+ printk(KERN_WARNING DRV_NAME
+ ": Unable to attach BlackFin MAC RX interrupt\n");
+ return -EBUSY;
+ }
+
+ /* Enable PHY output early */
+ if (!(bfin_read_VR_CTL() & PHYCLKOE))
+ bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE);
+
+ retval = register_netdev(dev);
+ if (retval == 0) {
+ /* now, print out the card info, in a short format.. */
+ printk(KERN_INFO "%s: Version %s, %s\n",
+ DRV_NAME, DRV_VERSION, DRV_DESC);
+ }
+
+err_out:
+ return retval;
+}
+
+static int bfin_mac_probe(struct platform_device *pdev)
+{
+ struct net_device *ndev;
+
+ ndev = alloc_etherdev(sizeof(struct bf537mac_local));
+ if (!ndev) {
+ printk(KERN_WARNING DRV_NAME ": could not allocate device\n");
+ return -ENOMEM;
+ }
+
+ SET_MODULE_OWNER(ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ platform_set_drvdata(pdev, ndev);
+
+ if (bf537mac_probe(ndev) != 0) {
+ platform_set_drvdata(pdev, NULL);
+ free_netdev(ndev);
+ printk(KERN_WARNING DRV_NAME ": not found\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int bfin_mac_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ unregister_netdev(ndev);
+
+ free_irq(IRQ_MAC_RX, ndev);
+
+ free_netdev(ndev);
+
+ setup_pin_mux(0);
+
+ return 0;
+}
+
+static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return 0;
+}
+
+static int bfin_mac_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver bfin_mac_driver = {
+ .probe = bfin_mac_probe,
+ .remove = bfin_mac_remove,
+ .resume = bfin_mac_resume,
+ .suspend = bfin_mac_suspend,
+ .driver = {
+ .name = DRV_NAME,
+ },
+};
+
+static int __init bfin_mac_init(void)
+{
+ return platform_driver_register(&bfin_mac_driver);
+}
+
+module_init(bfin_mac_init);
+
+static void __exit bfin_mac_cleanup(void)
+{
+ platform_driver_unregister(&bfin_mac_driver);
+}
+
+module_exit(bfin_mac_cleanup);
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
new file mode 100644
index 00000000000..af87189b85f
--- /dev/null
+++ b/drivers/net/bfin_mac.h
@@ -0,0 +1,132 @@
+/*
+ * File: drivers/net/bfin_mac.c
+ * Based on:
+ * Maintainer:
+ * Bryan Wu <bryan.wu@analog.com>
+ *
+ * Original author:
+ * Luke Yang <luke.yang@analog.com>
+ *
+ * Created:
+ * Description:
+ *
+ * Modified:
+ * Copyright 2004-2006 Analog Devices Inc.
+ *
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program ; see the file COPYING.
+ * If not, write to the Free Software Foundation,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ * PHY REGISTER NAMES
+ */
+#define PHYREG_MODECTL 0x0000
+#define PHYREG_MODESTAT 0x0001
+#define PHYREG_PHYID1 0x0002
+#define PHYREG_PHYID2 0x0003
+#define PHYREG_ANAR 0x0004
+#define PHYREG_ANLPAR 0x0005
+#define PHYREG_ANER 0x0006
+#define PHYREG_NSR 0x0010
+#define PHYREG_LBREMR 0x0011
+#define PHYREG_REC 0x0012
+#define PHYREG_10CFG 0x0013
+#define PHYREG_PHY1_1 0x0014
+#define PHYREG_PHY1_2 0x0015
+#define PHYREG_PHY2 0x0016
+#define PHYREG_TW_1 0x0017
+#define PHYREG_TW_2 0x0018
+#define PHYREG_TEST 0x0019
+
+#define PHY_RESET 0x8000
+#define PHY_ANEG_EN 0x1000
+#define PHY_DUPLEX 0x0100
+#define PHY_SPD_SET 0x2000
+
+#define BFIN_MAC_CSUM_OFFLOAD
+
+struct dma_descriptor {
+ struct dma_descriptor *next_dma_desc;
+ unsigned long start_addr;
+ unsigned short config;
+ unsigned short x_count;
+};
+
+struct status_area_rx {
+#if defined(BFIN_MAC_CSUM_OFFLOAD)
+ unsigned short ip_hdr_csum; /* ip header checksum */
+ /* ip payload(udp or tcp or others) checksum */
+ unsigned short ip_payload_csum;
+#endif
+ unsigned long status_word; /* the frame status word */
+};
+
+struct status_area_tx {
+ unsigned long status_word; /* the frame status word */
+};
+
+/* use two descriptors for a packet */
+struct net_dma_desc_rx {
+ struct net_dma_desc_rx *next;
+ struct sk_buff *skb;
+ struct dma_descriptor desc_a;
+ struct dma_descriptor desc_b;
+ struct status_area_rx status;
+};
+
+/* use two descriptors for a packet */
+struct net_dma_desc_tx {
+ struct net_dma_desc_tx *next;
+ struct sk_buff *skb;
+ struct dma_descriptor desc_a;
+ struct dma_descriptor desc_b;
+ unsigned char packet[1560];
+ struct status_area_tx status;
+};
+
+struct bf537mac_local {
+ /*
+ * these are things that the kernel wants me to keep, so users
+ * can find out semi-useless statistics of how well the card is
+ * performing
+ */
+ struct net_device_stats stats;
+
+ int version;
+
+ int FlowEnabled; /* record if data flow is active */
+ int EtherIntIVG; /* IVG for the ethernet interrupt */
+ int RXIVG; /* IVG for the RX completion */
+ int TXIVG; /* IVG for the TX completion */
+ int PhyAddr; /* PHY address */
+ int OpMode; /* set these bits n the OPMODE regs */
+ int Port10; /* set port speed to 10 Mbit/s */
+ int GenChksums; /* IP checksums to be calculated */
+ int NoRcveLnth; /* dont insert recv length at start of buffer */
+ int StripPads; /* remove trailing pad bytes */
+ int FullDuplex; /* set full duplex mode */
+ int Negotiate; /* enable auto negotiation */
+ int Loopback; /* loopback at the PHY */
+ int Cache; /* Buffers may be cached */
+ int FlowControl; /* flow control active */
+ int CLKIN; /* clock in value in MHZ */
+ unsigned short IntMask; /* interrupt mask */
+ unsigned char Mac[6]; /* MAC address of the board */
+ spinlock_t lock;
+};
+
+extern void get_bf537_ether_addr(char *addr);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index d23861c8658..a729da061bb 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -54,8 +54,8 @@
#define DRV_MODULE_NAME "bnx2"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "1.6.2"
-#define DRV_MODULE_RELDATE "July 6, 2007"
+#define DRV_MODULE_VERSION "1.6.3"
+#define DRV_MODULE_RELDATE "July 16, 2007"
#define RUN_AT(x) (jiffies + (x))
@@ -126,91 +126,102 @@ static struct pci_device_id bnx2_pci_tbl[] = {
static struct flash_spec flash_table[] =
{
+#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
+#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
/* Slow EEPROM */
{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
- 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
+ BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
"EEPROM - slow"},
/* Expansion entry 0001 */
{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 0001"},
/* Saifun SA25F010 (non-buffered flash) */
/* strap, cfg1, & write1 need updates */
{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
"Non-buffered flash (128kB)"},
/* Saifun SA25F020 (non-buffered flash) */
/* strap, cfg1, & write1 need updates */
{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
"Non-buffered flash (256kB)"},
/* Expansion entry 0100 */
{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 0100"},
/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
- 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
"Entry 0101: ST M45PE10 (128kB non-bufferred)"},
/* Entry 0110: ST M45PE20 (non-buffered flash)*/
{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
- 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
"Entry 0110: ST M45PE20 (256kB non-bufferred)"},
/* Saifun SA25F005 (non-buffered flash) */
/* strap, cfg1, & write1 need updates */
{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
"Non-buffered flash (64kB)"},
/* Fast EEPROM */
{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
- 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
+ BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
"EEPROM - fast"},
/* Expansion entry 1001 */
{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 1001"},
/* Expansion entry 1010 */
{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 1010"},
/* ATMEL AT45DB011B (buffered flash) */
{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
- 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
+ BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
"Buffered flash (128kB)"},
/* Expansion entry 1100 */
{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 1100"},
/* Expansion entry 1101 */
{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 1101"},
/* Ateml Expansion entry 1110 */
{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
- 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
+ BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
"Entry 1110 (Atmel)"},
/* ATMEL AT45DB021B (buffered flash) */
{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
- 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
+ BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
"Buffered flash (256kB)"},
};
+static struct flash_spec flash_5709 = {
+ .flags = BNX2_NV_BUFFERED,
+ .page_bits = BCM5709_FLASH_PAGE_BITS,
+ .page_size = BCM5709_FLASH_PAGE_SIZE,
+ .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
+ .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
+ .name = "5709 Buffered flash (256kB)",
+};
+
MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
static inline u32 bnx2_tx_avail(struct bnx2 *bp)
@@ -3289,7 +3300,7 @@ bnx2_enable_nvram_write(struct bnx2 *bp)
val = REG_RD(bp, BNX2_MISC_CFG);
REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
- if (!bp->flash_info->buffered) {
+ if (bp->flash_info->flags & BNX2_NV_WREN) {
int j;
REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
@@ -3349,7 +3360,7 @@ bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
u32 cmd;
int j;
- if (bp->flash_info->buffered)
+ if (bp->flash_info->flags & BNX2_NV_BUFFERED)
/* Buffered flash, no erase needed */
return 0;
@@ -3392,8 +3403,8 @@ bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
/* Build the command word. */
cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
- /* Calculate an offset of a buffered flash. */
- if (bp->flash_info->buffered) {
+ /* Calculate an offset of a buffered flash, not needed for 5709. */
+ if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
offset = ((offset / bp->flash_info->page_size) <<
bp->flash_info->page_bits) +
(offset % bp->flash_info->page_size);
@@ -3439,8 +3450,8 @@ bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
/* Build the command word. */
cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
- /* Calculate an offset of a buffered flash. */
- if (bp->flash_info->buffered) {
+ /* Calculate an offset of a buffered flash, not needed for 5709. */
+ if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
offset = ((offset / bp->flash_info->page_size) <<
bp->flash_info->page_bits) +
(offset % bp->flash_info->page_size);
@@ -3478,15 +3489,19 @@ static int
bnx2_init_nvram(struct bnx2 *bp)
{
u32 val;
- int j, entry_count, rc;
+ int j, entry_count, rc = 0;
struct flash_spec *flash;
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ bp->flash_info = &flash_5709;
+ goto get_flash_size;
+ }
+
/* Determine the selected interface. */
val = REG_RD(bp, BNX2_NVM_CFG1);
entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
- rc = 0;
if (val & 0x40000000) {
/* Flash interface has been reconfigured */
@@ -3542,6 +3557,7 @@ bnx2_init_nvram(struct bnx2 *bp)
return -ENODEV;
}
+get_flash_size:
val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
if (val)
@@ -3706,7 +3722,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
buf = align_buf;
}
- if (bp->flash_info->buffered == 0) {
+ if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
flash_buffer = kmalloc(264, GFP_KERNEL);
if (flash_buffer == NULL) {
rc = -ENOMEM;
@@ -3739,7 +3755,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
bnx2_enable_nvram_access(bp);
cmd_flags = BNX2_NVM_COMMAND_FIRST;
- if (bp->flash_info->buffered == 0) {
+ if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
int j;
/* Read the whole page into the buffer
@@ -3767,7 +3783,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
/* Loop to write back the buffer data from page_start to
* data_start */
i = 0;
- if (bp->flash_info->buffered == 0) {
+ if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
/* Erase the page */
if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
goto nvram_write_end;
@@ -3791,7 +3807,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
/* Loop to write the new data from data_start to data_end */
for (addr = data_start; addr < data_end; addr += 4, i += 4) {
if ((addr == page_end - 4) ||
- ((bp->flash_info->buffered) &&
+ ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
(addr == data_end - 4))) {
cmd_flags |= BNX2_NVM_COMMAND_LAST;
@@ -3808,7 +3824,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
/* Loop to write back the buffer data from data_end
* to page_end */
- if (bp->flash_info->buffered == 0) {
+ if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
for (addr = data_end; addr < page_end;
addr += 4, i += 4) {
@@ -4107,7 +4123,7 @@ bnx2_init_chip(struct bnx2 *bp)
if (CHIP_NUM(bp) == CHIP_NUM_5708)
REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
else
- REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
+ REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
if (CHIP_ID(bp) == CHIP_ID_5706_A1)
@@ -4127,10 +4143,6 @@ bnx2_init_chip(struct bnx2 *bp)
REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
- if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
- BNX2_PORT_FEATURE_ASF_ENABLED)
- bp->flags |= ASF_ENABLE_FLAG;
-
/* Initialize the receive filter. */
bnx2_set_rx_mode(bp->dev);
@@ -5786,8 +5798,9 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
bp->stats_ticks = USEC_PER_SEC;
}
- if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
- bp->stats_ticks &= 0xffff00;
+ if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
+ bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
+ bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
if (netif_running(bp->dev)) {
bnx2_netif_stop(bp);
@@ -6629,6 +6642,18 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
if (i != 2)
bp->fw_version[j++] = '.';
}
+ if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
+ BNX2_PORT_FEATURE_ASF_ENABLED) {
+ bp->flags |= ASF_ENABLE_FLAG;
+
+ for (i = 0; i < 30; i++) {
+ reg = REG_RD_IND(bp, bp->shmem_base +
+ BNX2_BC_STATE_CONDITION);
+ if (reg & BNX2_CONDITION_MFW_RUN_MASK)
+ break;
+ msleep(10);
+ }
+ }
reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
reg &= BNX2_CONDITION_MFW_RUN_MASK;
if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
@@ -6672,7 +6697,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->rx_ticks_int = 18;
bp->rx_ticks = 18;
- bp->stats_ticks = 1000000 & 0xffff00;
+ bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
bp->timer_interval = HZ;
bp->current_interval = HZ;
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index d8cd1afeb23..102adfe1e92 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6433,6 +6433,11 @@ struct sw_bd {
#define ST_MICRO_FLASH_PAGE_SIZE 256
#define ST_MICRO_FLASH_BASE_TOTAL_SIZE 65536
+#define BCM5709_FLASH_PAGE_BITS 8
+#define BCM5709_FLASH_PHY_PAGE_SIZE (1 << BCM5709_FLASH_PAGE_BITS)
+#define BCM5709_FLASH_BYTE_ADDR_MASK (BCM5709_FLASH_PHY_PAGE_SIZE-1)
+#define BCM5709_FLASH_PAGE_SIZE 256
+
#define NVRAM_TIMEOUT_COUNT 30000
@@ -6449,7 +6454,10 @@ struct flash_spec {
u32 config2;
u32 config3;
u32 write1;
- u32 buffered;
+ u32 flags;
+#define BNX2_NV_BUFFERED 0x00000001
+#define BNX2_NV_TRANSLATE 0x00000002
+#define BNX2_NV_WREN 0x00000004
u32 page_bits;
u32 page_size;
u32 addr_mask;
diff --git a/drivers/net/bsd_comp.c b/drivers/net/bsd_comp.c
index 7845eaf6f29..202d4a4ef75 100644
--- a/drivers/net/bsd_comp.c
+++ b/drivers/net/bsd_comp.c
@@ -395,14 +395,13 @@ static void *bsd_alloc (unsigned char *options, int opt_len, int decomp)
* Allocate the main control structure for this instance.
*/
maxmaxcode = MAXCODE(bits);
- db = kmalloc(sizeof (struct bsd_db),
+ db = kzalloc(sizeof (struct bsd_db),
GFP_KERNEL);
if (!db)
{
return NULL;
}
- memset (db, 0, sizeof(struct bsd_db));
/*
* Allocate space for the dictionary. This may be more than one page in
* length.
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 6628fa622e2..489c8b260dd 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -39,7 +39,7 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0070"
+#define DRV_VERSION "EHEA_0071"
/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 1d1571cf322..4c70a9301c1 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -466,6 +466,8 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev,
cqe->vlan_tag);
else
netif_receive_skb(skb);
+
+ dev->last_rx = jiffies;
} else {
pr->p_stats.poll_receive_errors++;
port_reset = ehea_treat_poll_error(pr, rq, cqe,
@@ -1433,7 +1435,8 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
port->logical_port_id,
reg_type, port->mac_addr, 0, hcallid);
if (hret != H_SUCCESS) {
- ehea_error("reg_dereg_bcmc failed (tagged)");
+ ehea_error("%sregistering bc address failed (tagged)",
+ hcallid == H_REG_BCMC ? "" : "de");
ret = -EIO;
goto out_herr;
}
@@ -1444,7 +1447,8 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
port->logical_port_id,
reg_type, port->mac_addr, 0, hcallid);
if (hret != H_SUCCESS) {
- ehea_error("reg_dereg_bcmc failed (vlan)");
+ ehea_error("%sregistering bc address failed (vlan)",
+ hcallid == H_REG_BCMC ? "" : "de");
ret = -EIO;
}
out_herr:
@@ -2170,7 +2174,6 @@ static int ehea_up(struct net_device *dev)
{
int ret, i;
struct ehea_port *port = netdev_priv(dev);
- u64 mac_addr = 0;
if (port->state == EHEA_PORT_UP)
return 0;
@@ -2189,18 +2192,10 @@ static int ehea_up(struct net_device *dev)
goto out_clean_pr;
}
- ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
- if (ret) {
- ret = -EIO;
- ehea_error("out_clean_pr");
- goto out_clean_pr;
- }
- mac_addr = (*(u64*)dev->dev_addr) >> 16;
-
ret = ehea_reg_interrupts(dev);
if (ret) {
- ehea_error("out_dereg_bc");
- goto out_dereg_bc;
+ ehea_error("reg_interrupts failed. ret:%d", ret);
+ goto out_clean_pr;
}
for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
@@ -2226,9 +2221,6 @@ static int ehea_up(struct net_device *dev)
out_free_irqs:
ehea_free_interrupts(dev);
-out_dereg_bc:
- ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
-
out_clean_pr:
ehea_clean_all_portres(port);
out:
@@ -2273,7 +2265,6 @@ static int ehea_down(struct net_device *dev)
&port->port_res[i].d_netdev->state))
msleep(1);
- ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
port->state = EHEA_PORT_DOWN;
ret = ehea_clean_all_portres(port);
@@ -2655,12 +2646,18 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
INIT_WORK(&port->reset_task, ehea_reset_port);
+ ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
+ if (ret) {
+ ret = -EIO;
+ goto out_unreg_port;
+ }
+
ehea_set_ethtool_ops(dev);
ret = register_netdev(dev);
if (ret) {
ehea_error("register_netdev failed. ret=%d", ret);
- goto out_unreg_port;
+ goto out_dereg_bc;
}
ret = ehea_get_jumboframe_status(port, &jumbo);
@@ -2675,6 +2672,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
return port;
+out_dereg_bc:
+ ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
+
out_unreg_port:
ehea_unregister_port(port);
@@ -2694,6 +2694,7 @@ static void ehea_shutdown_single_port(struct ehea_port *port)
{
unregister_netdev(port->netdev);
ehea_unregister_port(port);
+ ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
kfree(port->mc_list);
free_netdev(port->netdev);
port->adapter->active_ports--;
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 136827f8dc2..6d1d50a1978 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5137,12 +5137,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
goto out_unmap;
np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
}
- np->rx_skb = kmalloc(sizeof(struct nv_skb_map) * np->rx_ring_size, GFP_KERNEL);
- np->tx_skb = kmalloc(sizeof(struct nv_skb_map) * np->tx_ring_size, GFP_KERNEL);
+ np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
+ np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
if (!np->rx_skb || !np->tx_skb)
goto out_freering;
- memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
- memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
dev->open = nv_open;
dev->stop = nv_close;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index d7a1a58de76..f92690555dd 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -420,8 +420,18 @@ static phy_interface_t gfar_get_interface(struct net_device *dev)
if (ecntrl & ECNTRL_REDUCED_MODE) {
if (ecntrl & ECNTRL_REDUCED_MII_MODE)
return PHY_INTERFACE_MODE_RMII;
- else
+ else {
+ phy_interface_t interface = priv->einfo->interface;
+
+ /*
+ * This isn't autodetected right now, so it must
+ * be set by the device tree or platform code.
+ */
+ if (interface == PHY_INTERFACE_MODE_RGMII_ID)
+ return PHY_INTERFACE_MODE_RGMII_ID;
+
return PHY_INTERFACE_MODE_RGMII;
+ }
}
if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 84aa2117c0e..355c6cf3d11 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -320,7 +320,7 @@ static int eppconfig(struct baycom_state *bc)
sprintf(portarg, "%ld", bc->pdev->port->base);
printk(KERN_DEBUG "%s: %s -s -p %s -m %s\n", bc_drvname, eppconfig_path, portarg, modearg);
- return call_usermodehelper(eppconfig_path, argv, envp, 1);
+ return call_usermodehelper(eppconfig_path, argv, envp, UMH_WAIT_PROC);
}
/* ---------------------------------------------------------------------- */
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index 3be8c504759..205f0967249 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -453,8 +453,8 @@ static int __init setup_adapter(int card_base, int type, int n)
int scc_base = card_base + hw[type].scc_offset;
char *chipnames[] = CHIPNAMES;
- /* Allocate memory */
- info = kmalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
+ /* Initialize what is necessary for write_scc and write_scc_data */
+ info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
if (!info) {
printk(KERN_ERR "dmascc: "
"could not allocate memory for %s at %#3x\n",
@@ -462,8 +462,6 @@ static int __init setup_adapter(int card_base, int type, int n)
goto out;
}
- /* Initialize what is necessary for write_scc and write_scc_data */
- memset(info, 0, sizeof(struct scc_info));
info->dev[0] = alloc_netdev(0, "", dev_setup);
if (!info->dev[0]) {
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 829da9a1d11..2098d0af8ff 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -155,6 +155,15 @@ config KINGSUN_DONGLE
To compile it as a module, choose M here: the module will be called
kingsun-sir.
+config EP7211_DONGLE
+ tristate "EP7211 I/R support"
+ depends on IRTTY_SIR && ARCH_EP7211 && IRDA && EXPERIMENTAL
+ help
+ Say Y here if you want to build support for the Cirrus logic
+ EP7211 chipset's infrared module.
+
+
+
comment "Old SIR device drivers"
config IRPORT_SIR
@@ -355,7 +364,7 @@ config WINBOND_FIR
config TOSHIBA_FIR
tristate "Toshiba Type-O IR Port"
- depends on IRDA && PCI && !64BIT
+ depends on IRDA && PCI && !64BIT && VIRT_TO_BUS
help
Say Y here if you want to build support for the Toshiba Type-O IR
and Donau oboe chipsets. These chipsets are used by the Toshiba
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index 233a2f92373..2808ef5c7b7 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_MCP2120_DONGLE) += mcp2120-sir.o
obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o
obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o
obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o
+obj-$(CONFIG_EP7211_DONGLE) += ep7211-sir.o
obj-$(CONFIG_KINGSUN_DONGLE) += kingsun-sir.o
# The SIR helper module
diff --git a/drivers/net/irda/ep7211-sir.c b/drivers/net/irda/ep7211-sir.c
new file mode 100644
index 00000000000..831572429bb
--- /dev/null
+++ b/drivers/net/irda/ep7211-sir.c
@@ -0,0 +1,89 @@
+/*
+ * IR port driver for the Cirrus Logic EP7211 processor.
+ *
+ * Copyright 2001, Blue Mug Inc. All rights reserved.
+ * Copyright 2007, Samuel Ortiz <samuel@sortiz.org>
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+#include <asm/io.h>
+#include <asm/hardware.h>
+
+#include "sir-dev.h"
+
+#define MIN_DELAY 25 /* 15 us, but wait a little more to be sure */
+#define MAX_DELAY 10000 /* 1 ms */
+
+static int ep7211_open(struct sir_dev *dev);
+static int ep7211_close(struct sir_dev *dev);
+static int ep7211_change_speed(struct sir_dev *dev, unsigned speed);
+static int ep7211_reset(struct sir_dev *dev);
+
+static struct dongle_driver ep7211 = {
+ .owner = THIS_MODULE,
+ .driver_name = "EP7211 IR driver",
+ .type = IRDA_EP7211_DONGLE,
+ .open = ep7211_open,
+ .close = ep7211_close,
+ .reset = ep7211_reset,
+ .set_speed = ep7211_change_speed,
+};
+
+static int __init ep7211_sir_init(void)
+{
+ return irda_register_dongle(&ep7211);
+}
+
+static void __exit ep7211_sir_cleanup(void)
+{
+ irda_unregister_dongle(&ep7211);
+}
+
+static int ep7211_open(struct sir_dev *dev)
+{
+ unsigned int syscon;
+
+ /* Turn on the SIR encoder. */
+ syscon = clps_readl(SYSCON1);
+ syscon |= SYSCON1_SIREN;
+ clps_writel(syscon, SYSCON1);
+
+ return 0;
+}
+
+static int ep7211_close(struct sir_dev *dev)
+{
+ unsigned int syscon;
+
+ /* Turn off the SIR encoder. */
+ syscon = clps_readl(SYSCON1);
+ syscon &= ~SYSCON1_SIREN;
+ clps_writel(syscon, SYSCON1);
+
+ return 0;
+}
+
+static int ep7211_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ return 0;
+}
+
+static int ep7211_reset(struct sir_dev *dev)
+{
+ return 0;
+}
+
+MODULE_AUTHOR("Samuel Ortiz <samuel@sortiz.org>");
+MODULE_DESCRIPTION("EP7211 IR dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-13"); /* IRDA_EP7211_DONGLE */
+
+module_init(ep7211_sir_init);
+module_exit(ep7211_sir_cleanup);
diff --git a/drivers/net/irda/irport.c b/drivers/net/irda/irport.c
index 3078c419cb0..20732458f5a 100644
--- a/drivers/net/irda/irport.c
+++ b/drivers/net/irda/irport.c
@@ -164,14 +164,13 @@ irport_open(int i, unsigned int iobase, unsigned int irq)
/* Allocate memory if needed */
if (self->tx_buff.truesize > 0) {
- self->tx_buff.head = kmalloc(self->tx_buff.truesize,
+ self->tx_buff.head = kzalloc(self->tx_buff.truesize,
GFP_KERNEL);
if (self->tx_buff.head == NULL) {
IRDA_ERROR("%s(), can't allocate memory for "
"transmit buffer!\n", __FUNCTION__);
goto err_out4;
}
- memset(self->tx_buff.head, 0, self->tx_buff.truesize);
}
self->tx_buff.data = self->tx_buff.head;
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index ad1857364d5..6f5f697ec9f 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -505,10 +505,9 @@ static int irtty_open(struct tty_struct *tty)
}
/* allocate private device info block */
- priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
goto out_put;
- memset(priv, 0, sizeof(*priv));
priv->magic = IRTTY_MAGIC;
priv->tty = tty;
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 347d50cd77d..0433c41f902 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -822,10 +822,9 @@ static int veth_init_connection(u8 rlp)
|| ! HvLpConfig_doLpsCommunicateOnVirtualLan(this_lp, rlp) )
return 0;
- cnx = kmalloc(sizeof(*cnx), GFP_KERNEL);
+ cnx = kzalloc(sizeof(*cnx), GFP_KERNEL);
if (! cnx)
return -ENOMEM;
- memset(cnx, 0, sizeof(*cnx));
cnx->remote_lp = rlp;
spin_lock_init(&cnx->lock);
@@ -852,14 +851,13 @@ static int veth_init_connection(u8 rlp)
if (rc != 0)
return rc;
- msgs = kmalloc(VETH_NUMBUFFERS * sizeof(struct veth_msg), GFP_KERNEL);
+ msgs = kcalloc(VETH_NUMBUFFERS, sizeof(struct veth_msg), GFP_KERNEL);
if (! msgs) {
veth_error("Can't allocate buffers for LPAR %d.\n", rlp);
return -ENOMEM;
}
cnx->msgs = msgs;
- memset(msgs, 0, VETH_NUMBUFFERS * sizeof(struct veth_msg));
for (i = 0; i < VETH_NUMBUFFERS; i++) {
msgs[i].token = i;
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index a2f37e52b92..a4e5fab1262 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -533,11 +533,10 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
dev->base_addr = ioaddr;
/* Make certain the data structures used by the LANCE are aligned and DMAble. */
- lp = kmalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
+ lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
if(lp==NULL)
return -ENODEV;
if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
- memset(lp, 0, sizeof(*lp));
dev->priv = lp;
lp->name = chipname;
lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE,
diff --git a/drivers/net/lguest_net.c b/drivers/net/lguest_net.c
new file mode 100644
index 00000000000..112778652f7
--- /dev/null
+++ b/drivers/net/lguest_net.c
@@ -0,0 +1,354 @@
+/* A simple network driver for lguest.
+ *
+ * Copyright 2006 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+//#define DEBUG
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/mm_types.h>
+#include <linux/io.h>
+#include <linux/lguest_bus.h>
+
+#define SHARED_SIZE PAGE_SIZE
+#define MAX_LANS 4
+#define NUM_SKBS 8
+
+struct lguestnet_info
+{
+ /* The shared page(s). */
+ struct lguest_net *peer;
+ unsigned long peer_phys;
+ unsigned long mapsize;
+
+ /* The lguest_device I come from */
+ struct lguest_device *lgdev;
+
+ /* My peerid. */
+ unsigned int me;
+
+ /* Receive queue. */
+ struct sk_buff *skb[NUM_SKBS];
+ struct lguest_dma dma[NUM_SKBS];
+};
+
+/* How many bytes left in this page. */
+static unsigned int rest_of_page(void *data)
+{
+ return PAGE_SIZE - ((unsigned long)data % PAGE_SIZE);
+}
+
+/* Simple convention: offset 4 * peernum. */
+static unsigned long peer_key(struct lguestnet_info *info, unsigned peernum)
+{
+ return info->peer_phys + 4 * peernum;
+}
+
+static void skb_to_dma(const struct sk_buff *skb, unsigned int headlen,
+ struct lguest_dma *dma)
+{
+ unsigned int i, seg;
+
+ for (i = seg = 0; i < headlen; seg++, i += rest_of_page(skb->data+i)) {
+ dma->addr[seg] = virt_to_phys(skb->data + i);
+ dma->len[seg] = min((unsigned)(headlen - i),
+ rest_of_page(skb->data + i));
+ }
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, seg++) {
+ const skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+ /* Should not happen with MTU less than 64k - 2 * PAGE_SIZE. */
+ if (seg == LGUEST_MAX_DMA_SECTIONS) {
+ printk("Woah dude! Megapacket!\n");
+ break;
+ }
+ dma->addr[seg] = page_to_phys(f->page) + f->page_offset;
+ dma->len[seg] = f->size;
+ }
+ if (seg < LGUEST_MAX_DMA_SECTIONS)
+ dma->len[seg] = 0;
+}
+
+/* We overload multicast bit to show promiscuous mode. */
+#define PROMISC_BIT 0x01
+
+static void lguestnet_set_multicast(struct net_device *dev)
+{
+ struct lguestnet_info *info = netdev_priv(dev);
+
+ if ((dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) || dev->mc_count)
+ info->peer[info->me].mac[0] |= PROMISC_BIT;
+ else
+ info->peer[info->me].mac[0] &= ~PROMISC_BIT;
+}
+
+static int promisc(struct lguestnet_info *info, unsigned int peer)
+{
+ return info->peer[peer].mac[0] & PROMISC_BIT;
+}
+
+static int mac_eq(const unsigned char mac[ETH_ALEN],
+ struct lguestnet_info *info, unsigned int peer)
+{
+ /* Ignore multicast bit, which peer turns on to mean promisc. */
+ if ((info->peer[peer].mac[0] & (~PROMISC_BIT)) != mac[0])
+ return 0;
+ return memcmp(mac+1, info->peer[peer].mac+1, ETH_ALEN-1) == 0;
+}
+
+static void transfer_packet(struct net_device *dev,
+ struct sk_buff *skb,
+ unsigned int peernum)
+{
+ struct lguestnet_info *info = netdev_priv(dev);
+ struct lguest_dma dma;
+
+ skb_to_dma(skb, skb_headlen(skb), &dma);
+ pr_debug("xfer length %04x (%u)\n", htons(skb->len), skb->len);
+
+ lguest_send_dma(peer_key(info, peernum), &dma);
+ if (dma.used_len != skb->len) {
+ dev->stats.tx_carrier_errors++;
+ pr_debug("Bad xfer to peer %i: %i of %i (dma %p/%i)\n",
+ peernum, dma.used_len, skb->len,
+ (void *)dma.addr[0], dma.len[0]);
+ } else {
+ dev->stats.tx_bytes += skb->len;
+ dev->stats.tx_packets++;
+ }
+}
+
+static int unused_peer(const struct lguest_net peer[], unsigned int num)
+{
+ return peer[num].mac[0] == 0;
+}
+
+static int lguestnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned int i;
+ int broadcast;
+ struct lguestnet_info *info = netdev_priv(dev);
+ const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
+
+ pr_debug("%s: xmit %02x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name, dest[0],dest[1],dest[2],dest[3],dest[4],dest[5]);
+
+ broadcast = is_multicast_ether_addr(dest);
+ for (i = 0; i < info->mapsize/sizeof(struct lguest_net); i++) {
+ if (i == info->me || unused_peer(info->peer, i))
+ continue;
+
+ if (!broadcast && !promisc(info, i) && !mac_eq(dest, info, i))
+ continue;
+
+ pr_debug("lguestnet %s: sending from %i to %i\n",
+ dev->name, info->me, i);
+ transfer_packet(dev, skb, i);
+ }
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+/* Find a new skb to put in this slot in shared mem. */
+static int fill_slot(struct net_device *dev, unsigned int slot)
+{
+ struct lguestnet_info *info = netdev_priv(dev);
+ /* Try to create and register a new one. */
+ info->skb[slot] = netdev_alloc_skb(dev, ETH_HLEN + ETH_DATA_LEN);
+ if (!info->skb[slot]) {
+ printk("%s: could not fill slot %i\n", dev->name, slot);
+ return -ENOMEM;
+ }
+
+ skb_to_dma(info->skb[slot], ETH_HLEN + ETH_DATA_LEN, &info->dma[slot]);
+ wmb();
+ /* Now we tell hypervisor it can use the slot. */
+ info->dma[slot].used_len = 0;
+ return 0;
+}
+
+static irqreturn_t lguestnet_rcv(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct lguestnet_info *info = netdev_priv(dev);
+ unsigned int i, done = 0;
+
+ for (i = 0; i < ARRAY_SIZE(info->dma); i++) {
+ unsigned int length;
+ struct sk_buff *skb;
+
+ length = info->dma[i].used_len;
+ if (length == 0)
+ continue;
+
+ done++;
+ skb = info->skb[i];
+ fill_slot(dev, i);
+
+ if (length < ETH_HLEN || length > ETH_HLEN + ETH_DATA_LEN) {
+ pr_debug(KERN_WARNING "%s: unbelievable skb len: %i\n",
+ dev->name, length);
+ dev_kfree_skb(skb);
+ continue;
+ }
+
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, dev);
+ /* This is a reliable transport. */
+ if (dev->features & NETIF_F_NO_CSUM)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
+ ntohs(skb->protocol), skb->len, skb->pkt_type);
+
+ dev->stats.rx_bytes += skb->len;
+ dev->stats.rx_packets++;
+ netif_rx(skb);
+ }
+ return done ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int lguestnet_open(struct net_device *dev)
+{
+ int i;
+ struct lguestnet_info *info = netdev_priv(dev);
+
+ /* Set up our MAC address */
+ memcpy(info->peer[info->me].mac, dev->dev_addr, ETH_ALEN);
+
+ /* Turn on promisc mode if needed */
+ lguestnet_set_multicast(dev);
+
+ for (i = 0; i < ARRAY_SIZE(info->dma); i++) {
+ if (fill_slot(dev, i) != 0)
+ goto cleanup;
+ }
+ if (lguest_bind_dma(peer_key(info,info->me), info->dma,
+ NUM_SKBS, lgdev_irq(info->lgdev)) != 0)
+ goto cleanup;
+ return 0;
+
+cleanup:
+ while (--i >= 0)
+ dev_kfree_skb(info->skb[i]);
+ return -ENOMEM;
+}
+
+static int lguestnet_close(struct net_device *dev)
+{
+ unsigned int i;
+ struct lguestnet_info *info = netdev_priv(dev);
+
+ /* Clear all trace: others might deliver packets, we'll ignore it. */
+ memset(&info->peer[info->me], 0, sizeof(info->peer[info->me]));
+
+ /* Deregister sg lists. */
+ lguest_unbind_dma(peer_key(info, info->me), info->dma);
+ for (i = 0; i < ARRAY_SIZE(info->dma); i++)
+ dev_kfree_skb(info->skb[i]);
+ return 0;
+}
+
+static int lguestnet_probe(struct lguest_device *lgdev)
+{
+ int err, irqf = IRQF_SHARED;
+ struct net_device *dev;
+ struct lguestnet_info *info;
+ struct lguest_device_desc *desc = &lguest_devices[lgdev->index];
+
+ pr_debug("lguest_net: probing for device %i\n", lgdev->index);
+
+ dev = alloc_etherdev(sizeof(struct lguestnet_info));
+ if (!dev)
+ return -ENOMEM;
+
+ SET_MODULE_OWNER(dev);
+
+ /* Ethernet defaults with some changes */
+ ether_setup(dev);
+ dev->set_mac_address = NULL;
+
+ dev->dev_addr[0] = 0x02; /* set local assignment bit (IEEE802) */
+ dev->dev_addr[1] = 0x00;
+ memcpy(&dev->dev_addr[2], &lguest_data.guestid, 2);
+ dev->dev_addr[4] = 0x00;
+ dev->dev_addr[5] = 0x00;
+
+ dev->open = lguestnet_open;
+ dev->stop = lguestnet_close;
+ dev->hard_start_xmit = lguestnet_start_xmit;
+
+ /* Turning on/off promisc will call dev->set_multicast_list.
+ * We don't actually support multicast yet */
+ dev->set_multicast_list = lguestnet_set_multicast;
+ SET_NETDEV_DEV(dev, &lgdev->dev);
+ if (desc->features & LGUEST_NET_F_NOCSUM)
+ dev->features = NETIF_F_SG|NETIF_F_NO_CSUM;
+
+ info = netdev_priv(dev);
+ info->mapsize = PAGE_SIZE * desc->num_pages;
+ info->peer_phys = ((unsigned long)desc->pfn << PAGE_SHIFT);
+ info->lgdev = lgdev;
+ info->peer = lguest_map(info->peer_phys, desc->num_pages);
+ if (!info->peer) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ /* This stores our peerid (upper bits reserved for future). */
+ info->me = (desc->features & (info->mapsize-1));
+
+ err = register_netdev(dev);
+ if (err) {
+ pr_debug("lguestnet: registering device failed\n");
+ goto unmap;
+ }
+
+ if (lguest_devices[lgdev->index].features & LGUEST_DEVICE_F_RANDOMNESS)
+ irqf |= IRQF_SAMPLE_RANDOM;
+ if (request_irq(lgdev_irq(lgdev), lguestnet_rcv, irqf, "lguestnet",
+ dev) != 0) {
+ pr_debug("lguestnet: cannot get irq %i\n", lgdev_irq(lgdev));
+ goto unregister;
+ }
+
+ pr_debug("lguestnet: registered device %s\n", dev->name);
+ lgdev->private = dev;
+ return 0;
+
+unregister:
+ unregister_netdev(dev);
+unmap:
+ lguest_unmap(info->peer);
+free:
+ free_netdev(dev);
+ return err;
+}
+
+static struct lguest_driver lguestnet_drv = {
+ .name = "lguestnet",
+ .owner = THIS_MODULE,
+ .device_type = LGUEST_DEVICE_T_NET,
+ .probe = lguestnet_probe,
+};
+
+static __init int lguestnet_init(void)
+{
+ return register_lguest_driver(&lguestnet_drv);
+}
+module_init(lguestnet_init);
+
+MODULE_DESCRIPTION("Lguest network driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c
index 26a3b45a4a3..62c1c6262fe 100644
--- a/drivers/net/mac89x0.c
+++ b/drivers/net/mac89x0.c
@@ -608,7 +608,7 @@ module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "CS89[02]0 debug level (0-5)");
MODULE_LICENSE("GPL");
-int
+int __init
init_module(void)
{
net_debug = debug;
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c
index 1bb088aeaf7..6b32ec94b3a 100644
--- a/drivers/net/mlx4/catas.c
+++ b/drivers/net/mlx4/catas.c
@@ -30,41 +30,133 @@
* SOFTWARE.
*/
+#include <linux/workqueue.h>
+
#include "mlx4.h"
-void mlx4_handle_catas_err(struct mlx4_dev *dev)
+enum {
+ MLX4_CATAS_POLL_INTERVAL = 5 * HZ,
+};
+
+static DEFINE_SPINLOCK(catas_lock);
+
+static LIST_HEAD(catas_list);
+static struct workqueue_struct *catas_wq;
+static struct work_struct catas_work;
+
+static int internal_err_reset = 1;
+module_param(internal_err_reset, int, 0644);
+MODULE_PARM_DESC(internal_err_reset,
+ "Reset device on internal errors if non-zero (default 1)");
+
+static void dump_err_buf(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
- mlx4_err(dev, "Catastrophic error detected:\n");
+ mlx4_err(dev, "Internal error detected:\n");
for (i = 0; i < priv->fw.catas_size; ++i)
mlx4_err(dev, " buf[%02x]: %08x\n",
i, swab32(readl(priv->catas_err.map + i)));
+}
- mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0);
+static void poll_catas(unsigned long dev_ptr)
+{
+ struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ if (readl(priv->catas_err.map)) {
+ dump_err_buf(dev);
+
+ mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0);
+
+ if (internal_err_reset) {
+ spin_lock(&catas_lock);
+ list_add(&priv->catas_err.list, &catas_list);
+ spin_unlock(&catas_lock);
+
+ queue_work(catas_wq, &catas_work);
+ }
+ } else
+ mod_timer(&priv->catas_err.timer,
+ round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
}
-void mlx4_map_catas_buf(struct mlx4_dev *dev)
+static void catas_reset(struct work_struct *work)
+{
+ struct mlx4_priv *priv, *tmppriv;
+ struct mlx4_dev *dev;
+
+ LIST_HEAD(tlist);
+ int ret;
+
+ spin_lock_irq(&catas_lock);
+ list_splice_init(&catas_list, &tlist);
+ spin_unlock_irq(&catas_lock);
+
+ list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) {
+ ret = mlx4_restart_one(priv->dev.pdev);
+ dev = &priv->dev;
+ if (ret)
+ mlx4_err(dev, "Reset failed (%d)\n", ret);
+ else
+ mlx4_dbg(dev, "Reset succeeded\n");
+ }
+}
+
+void mlx4_start_catas_poll(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
unsigned long addr;
+ INIT_LIST_HEAD(&priv->catas_err.list);
+ init_timer(&priv->catas_err.timer);
+ priv->catas_err.map = NULL;
+
addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) +
priv->fw.catas_offset;
priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
- if (!priv->catas_err.map)
- mlx4_warn(dev, "Failed to map catastrophic error buffer at 0x%lx\n",
+ if (!priv->catas_err.map) {
+ mlx4_warn(dev, "Failed to map internal error buffer at 0x%lx\n",
addr);
+ return;
+ }
+ priv->catas_err.timer.data = (unsigned long) dev;
+ priv->catas_err.timer.function = poll_catas;
+ priv->catas_err.timer.expires =
+ round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL);
+ add_timer(&priv->catas_err.timer);
}
-void mlx4_unmap_catas_buf(struct mlx4_dev *dev)
+void mlx4_stop_catas_poll(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
+ del_timer_sync(&priv->catas_err.timer);
+
if (priv->catas_err.map)
iounmap(priv->catas_err.map);
+
+ spin_lock_irq(&catas_lock);
+ list_del(&priv->catas_err.list);
+ spin_unlock_irq(&catas_lock);
+}
+
+int __init mlx4_catas_init(void)
+{
+ INIT_WORK(&catas_work, catas_reset);
+
+ catas_wq = create_singlethread_workqueue("mlx4_err");
+ if (!catas_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mlx4_catas_cleanup(void)
+{
+ destroy_workqueue(catas_wq);
}
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 27a82cecd69..2095c843fa1 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -89,14 +89,12 @@ struct mlx4_eq_context {
(1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
(1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
(1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
- (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
(1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
(1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
(1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
(1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
(1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
(1ull << MLX4_EVENT_TYPE_CMD))
-#define MLX4_CATAS_EVENT_MASK (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR)
struct mlx4_eqe {
u8 reserved1;
@@ -264,7 +262,7 @@ static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
- for (i = 0; i < MLX4_EQ_CATAS; ++i)
+ for (i = 0; i < MLX4_NUM_EQ; ++i)
work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
return IRQ_RETVAL(work);
@@ -281,14 +279,6 @@ static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
return IRQ_HANDLED;
}
-static irqreturn_t mlx4_catas_interrupt(int irq, void *dev_ptr)
-{
- mlx4_handle_catas_err(dev_ptr);
-
- /* MSI-X vectors always belong to us */
- return IRQ_HANDLED;
-}
-
static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
int eq_num)
{
@@ -490,11 +480,9 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
if (eq_table->have_irq)
free_irq(dev->pdev->irq, dev);
- for (i = 0; i < MLX4_EQ_CATAS; ++i)
+ for (i = 0; i < MLX4_NUM_EQ; ++i)
if (eq_table->eq[i].have_irq)
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
- if (eq_table->eq[MLX4_EQ_CATAS].have_irq)
- free_irq(eq_table->eq[MLX4_EQ_CATAS].irq, dev);
}
static int __devinit mlx4_map_clr_int(struct mlx4_dev *dev)
@@ -598,32 +586,19 @@ int __devinit mlx4_init_eq_table(struct mlx4_dev *dev)
if (dev->flags & MLX4_FLAG_MSI_X) {
static const char *eq_name[] = {
[MLX4_EQ_COMP] = DRV_NAME " (comp)",
- [MLX4_EQ_ASYNC] = DRV_NAME " (async)",
- [MLX4_EQ_CATAS] = DRV_NAME " (catas)"
+ [MLX4_EQ_ASYNC] = DRV_NAME " (async)"
};
- err = mlx4_create_eq(dev, 1, MLX4_EQ_CATAS,
- &priv->eq_table.eq[MLX4_EQ_CATAS]);
- if (err)
- goto err_out_async;
-
- for (i = 0; i < MLX4_EQ_CATAS; ++i) {
+ for (i = 0; i < MLX4_NUM_EQ; ++i) {
err = request_irq(priv->eq_table.eq[i].irq,
mlx4_msi_x_interrupt,
0, eq_name[i], priv->eq_table.eq + i);
if (err)
- goto err_out_catas;
+ goto err_out_async;
priv->eq_table.eq[i].have_irq = 1;
}
- err = request_irq(priv->eq_table.eq[MLX4_EQ_CATAS].irq,
- mlx4_catas_interrupt, 0,
- eq_name[MLX4_EQ_CATAS], dev);
- if (err)
- goto err_out_catas;
-
- priv->eq_table.eq[MLX4_EQ_CATAS].have_irq = 1;
} else {
err = request_irq(dev->pdev->irq, mlx4_interrupt,
IRQF_SHARED, DRV_NAME, dev);
@@ -639,22 +614,11 @@ int __devinit mlx4_init_eq_table(struct mlx4_dev *dev)
mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
- for (i = 0; i < MLX4_EQ_CATAS; ++i)
+ for (i = 0; i < MLX4_NUM_EQ; ++i)
eq_set_ci(&priv->eq_table.eq[i], 1);
- if (dev->flags & MLX4_FLAG_MSI_X) {
- err = mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 0,
- priv->eq_table.eq[MLX4_EQ_CATAS].eqn);
- if (err)
- mlx4_warn(dev, "MAP_EQ for catas EQ %d failed (%d)\n",
- priv->eq_table.eq[MLX4_EQ_CATAS].eqn, err);
- }
-
return 0;
-err_out_catas:
- mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]);
-
err_out_async:
mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
@@ -675,19 +639,13 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
- if (dev->flags & MLX4_FLAG_MSI_X)
- mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 1,
- priv->eq_table.eq[MLX4_EQ_CATAS].eqn);
-
mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
mlx4_free_irqs(dev);
- for (i = 0; i < MLX4_EQ_CATAS; ++i)
+ for (i = 0; i < MLX4_NUM_EQ; ++i)
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
- if (dev->flags & MLX4_FLAG_MSI_X)
- mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]);
mlx4_unmap_clr_int(dev);
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c
index 9ae951bf6aa..be5d9e90ccf 100644
--- a/drivers/net/mlx4/intf.c
+++ b/drivers/net/mlx4/intf.c
@@ -142,6 +142,7 @@ int mlx4_register_device(struct mlx4_dev *dev)
mlx4_add_device(intf, priv);
mutex_unlock(&intf_mutex);
+ mlx4_start_catas_poll(dev);
return 0;
}
@@ -151,6 +152,7 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_interface *intf;
+ mlx4_stop_catas_poll(dev);
mutex_lock(&intf_mutex);
list_for_each_entry(intf, &intf_list, list)
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index a4f2e0475a7..4dc9dc19b71 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -78,7 +78,7 @@ static const char mlx4_version[] __devinitdata =
static struct mlx4_profile default_profile = {
.num_qp = 1 << 16,
.num_srq = 1 << 16,
- .rdmarc_per_qp = 4,
+ .rdmarc_per_qp = 1 << 4,
.num_cq = 1 << 16,
.num_mcg = 1 << 13,
.num_mpt = 1 << 17,
@@ -583,13 +583,11 @@ static int __devinit mlx4_setup_hca(struct mlx4_dev *dev)
goto err_pd_table_free;
}
- mlx4_map_catas_buf(dev);
-
err = mlx4_init_eq_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize "
"event queue table, aborting.\n");
- goto err_catas_buf;
+ goto err_mr_table_free;
}
err = mlx4_cmd_use_events(dev);
@@ -659,8 +657,7 @@ err_cmd_poll:
err_eq_table_free:
mlx4_cleanup_eq_table(dev);
-err_catas_buf:
- mlx4_unmap_catas_buf(dev);
+err_mr_table_free:
mlx4_cleanup_mr_table(dev);
err_pd_table_free:
@@ -836,9 +833,6 @@ err_cleanup:
mlx4_cleanup_cq_table(dev);
mlx4_cmd_use_polling(dev);
mlx4_cleanup_eq_table(dev);
-
- mlx4_unmap_catas_buf(dev);
-
mlx4_cleanup_mr_table(dev);
mlx4_cleanup_pd_table(dev);
mlx4_cleanup_uar_table(dev);
@@ -885,9 +879,6 @@ static void __devexit mlx4_remove_one(struct pci_dev *pdev)
mlx4_cleanup_cq_table(dev);
mlx4_cmd_use_polling(dev);
mlx4_cleanup_eq_table(dev);
-
- mlx4_unmap_catas_buf(dev);
-
mlx4_cleanup_mr_table(dev);
mlx4_cleanup_pd_table(dev);
@@ -908,6 +899,12 @@ static void __devexit mlx4_remove_one(struct pci_dev *pdev)
}
}
+int mlx4_restart_one(struct pci_dev *pdev)
+{
+ mlx4_remove_one(pdev);
+ return mlx4_init_one(pdev, NULL);
+}
+
static struct pci_device_id mlx4_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
{ PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
@@ -930,6 +927,10 @@ static int __init mlx4_init(void)
{
int ret;
+ ret = mlx4_catas_init();
+ if (ret)
+ return ret;
+
ret = pci_register_driver(&mlx4_driver);
return ret < 0 ? ret : 0;
}
@@ -937,6 +938,7 @@ static int __init mlx4_init(void)
static void __exit mlx4_cleanup(void)
{
pci_unregister_driver(&mlx4_driver);
+ mlx4_catas_cleanup();
}
module_init(mlx4_init);
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index d9c91a71fc8..be304a7c2c9 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -39,6 +39,7 @@
#include <linux/mutex.h>
#include <linux/radix-tree.h>
+#include <linux/timer.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
@@ -67,7 +68,6 @@ enum {
enum {
MLX4_EQ_ASYNC,
MLX4_EQ_COMP,
- MLX4_EQ_CATAS,
MLX4_NUM_EQ
};
@@ -248,7 +248,8 @@ struct mlx4_mcg_table {
struct mlx4_catas_err {
u32 __iomem *map;
- int size;
+ struct timer_list timer;
+ struct list_head list;
};
struct mlx4_priv {
@@ -311,9 +312,11 @@ void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
-void mlx4_map_catas_buf(struct mlx4_dev *dev);
-void mlx4_unmap_catas_buf(struct mlx4_dev *dev);
-
+void mlx4_start_catas_poll(struct mlx4_dev *dev);
+void mlx4_stop_catas_poll(struct mlx4_dev *dev);
+int mlx4_catas_init(void);
+void mlx4_catas_cleanup(void);
+int mlx4_restart_one(struct pci_dev *pdev);
int mlx4_register_device(struct mlx4_dev *dev);
void mlx4_unregister_device(struct mlx4_dev *dev);
void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type,
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c
index 3d5b4232f65..22a3b3dc7d8 100644
--- a/drivers/net/ni5010.c
+++ b/drivers/net/ni5010.c
@@ -670,14 +670,10 @@ static void ni5010_set_multicast_list(struct net_device *dev)
PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name));
- if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI) {
+ if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI || dev->mc_list) {
dev->flags |= IFF_PROMISC;
outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */
PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name));
- } else if (dev->mc_list) {
- /* Sorry, multicast not supported */
- PRINTK((KERN_DEBUG "%s: No multicast, entering broadcast mode\n", dev->name));
- outb(RMD_BROADCAST, EDLC_RMODE);
} else {
PRINTK((KERN_DEBUG "%s: Entering broadcast mode\n", dev->name));
outb(RMD_BROADCAST, EDLC_RMODE); /* Disable promiscuous mode, use normal mode */
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 104aab3c957..ea80e6cb3de 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1582,7 +1582,7 @@ static void ns83820_set_multicast(struct net_device *ndev)
else
and_mask &= ~(RFCR_AAU | RFCR_AAM);
- if (ndev->flags & IFF_ALLMULTI)
+ if (ndev->flags & IFF_ALLMULTI || ndev->mc_count)
or_mask |= RFCR_AAM;
else
and_mask &= ~RFCR_AAM;
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c
index 0d1c7a41c9c..ea9414c4d90 100644
--- a/drivers/net/pcmcia/com20020_cs.c
+++ b/drivers/net/pcmcia/com20020_cs.c
@@ -147,7 +147,7 @@ static int com20020_probe(struct pcmcia_device *p_dev)
DEBUG(0, "com20020_attach()\n");
/* Create new network device */
- info = kmalloc(sizeof(struct com20020_dev_t), GFP_KERNEL);
+ info = kzalloc(sizeof(struct com20020_dev_t), GFP_KERNEL);
if (!info)
goto fail_alloc_info;
@@ -155,7 +155,6 @@ static int com20020_probe(struct pcmcia_device *p_dev)
if (!dev)
goto fail_alloc_dev;
- memset(info, 0, sizeof(struct com20020_dev_t));
lp = dev->priv;
lp->timeout = timeout;
lp->backplane = backplane;
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index 4ecb8ca5a99..4eafa4f42cf 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -146,9 +146,8 @@ static int __devinit ibmtr_attach(struct pcmcia_device *link)
DEBUG(0, "ibmtr_attach()\n");
/* Create new token-ring device */
- info = kmalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) return -ENOMEM;
- memset(info,0,sizeof(*info));
dev = alloc_trdev(sizeof(struct tok_info));
if (!dev) {
kfree(info);
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 596222b260d..6a538564791 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -21,6 +21,10 @@
/* Vitesse Extended Control Register 1 */
#define MII_VSC8244_EXT_CON1 0x17
#define MII_VSC8244_EXTCON1_INIT 0x0000
+#define MII_VSC8244_EXTCON1_TX_SKEW_MASK 0x0c00
+#define MII_VSC8244_EXTCON1_RX_SKEW_MASK 0x0300
+#define MII_VSC8244_EXTCON1_TX_SKEW 0x0800
+#define MII_VSC8244_EXTCON1_RX_SKEW 0x0200
/* Vitesse Interrupt Mask Register */
#define MII_VSC8244_IMASK 0x19
@@ -39,7 +43,7 @@
/* Vitesse Auxiliary Control/Status Register */
#define MII_VSC8244_AUX_CONSTAT 0x1c
-#define MII_VSC8244_AUXCONSTAT_INIT 0x0004
+#define MII_VSC8244_AUXCONSTAT_INIT 0x0000
#define MII_VSC8244_AUXCONSTAT_DUPLEX 0x0020
#define MII_VSC8244_AUXCONSTAT_SPEED 0x0018
#define MII_VSC8244_AUXCONSTAT_GBIT 0x0010
@@ -51,6 +55,7 @@ MODULE_LICENSE("GPL");
static int vsc824x_config_init(struct phy_device *phydev)
{
+ int extcon;
int err;
err = phy_write(phydev, MII_VSC8244_AUX_CONSTAT,
@@ -58,14 +63,34 @@ static int vsc824x_config_init(struct phy_device *phydev)
if (err < 0)
return err;
- err = phy_write(phydev, MII_VSC8244_EXT_CON1,
- MII_VSC8244_EXTCON1_INIT);
+ extcon = phy_read(phydev, MII_VSC8244_EXT_CON1);
+
+ if (extcon < 0)
+ return err;
+
+ extcon &= ~(MII_VSC8244_EXTCON1_TX_SKEW_MASK |
+ MII_VSC8244_EXTCON1_RX_SKEW_MASK);
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+ extcon |= (MII_VSC8244_EXTCON1_TX_SKEW |
+ MII_VSC8244_EXTCON1_RX_SKEW);
+
+ err = phy_write(phydev, MII_VSC8244_EXT_CON1, extcon);
+
return err;
}
static int vsc824x_ack_interrupt(struct phy_device *phydev)
{
- int err = phy_read(phydev, MII_VSC8244_ISTAT);
+ int err = 0;
+
+ /*
+ * Don't bother to ACK the interrupts if interrupts
+ * are disabled. The 824x cannot clear the interrupts
+ * if they are disabled.
+ */
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ err = phy_read(phydev, MII_VSC8244_ISTAT);
return (err < 0) ? err : 0;
}
@@ -77,8 +102,19 @@ static int vsc824x_config_intr(struct phy_device *phydev)
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
err = phy_write(phydev, MII_VSC8244_IMASK,
MII_VSC8244_IMASK_MASK);
- else
+ else {
+ /*
+ * The Vitesse PHY cannot clear the interrupt
+ * once it has disabled them, so we clear them first
+ */
+ err = phy_read(phydev, MII_VSC8244_ISTAT);
+
+ if (err)
+ return err;
+
err = phy_write(phydev, MII_VSC8244_IMASK, 0);
+ }
+
return err;
}
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index caabbc408c3..27f5b904f48 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -159,12 +159,11 @@ ppp_asynctty_open(struct tty_struct *tty)
int err;
err = -ENOMEM;
- ap = kmalloc(sizeof(*ap), GFP_KERNEL);
+ ap = kzalloc(sizeof(*ap), GFP_KERNEL);
if (ap == 0)
goto out;
/* initialize the asyncppp structure */
- memset(ap, 0, sizeof(*ap));
ap->tty = tty;
ap->mru = PPP_MRU;
spin_lock_init(&ap->xmit_lock);
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
index 72c8d6628f5..eb98b661efb 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp_deflate.c
@@ -121,12 +121,11 @@ static void *z_comp_alloc(unsigned char *options, int opt_len)
if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE)
return NULL;
- state = kmalloc(sizeof(*state),
+ state = kzalloc(sizeof(*state),
GFP_KERNEL);
if (state == NULL)
return NULL;
- memset (state, 0, sizeof (struct ppp_deflate_state));
state->strm.next_in = NULL;
state->w_size = w_size;
state->strm.workspace = vmalloc(zlib_deflate_workspacesize());
@@ -341,11 +340,10 @@ static void *z_decomp_alloc(unsigned char *options, int opt_len)
if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE)
return NULL;
- state = kmalloc(sizeof(*state), GFP_KERNEL);
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state == NULL)
return NULL;
- memset (state, 0, sizeof (struct ppp_deflate_state));
state->w_size = w_size;
state->strm.next_out = NULL;
state->strm.workspace = kmalloc(zlib_inflate_workspacesize(),
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 3ef0092dc09..ef3325b6923 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -2684,8 +2684,7 @@ static void __exit ppp_cleanup(void)
if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
printk(KERN_ERR "PPP: removing module but units remain!\n");
cardmap_destroy(&all_ppp_units);
- if (unregister_chrdev(PPP_MAJOR, "ppp") != 0)
- printk(KERN_ERR "PPP: failed to unregister PPP device\n");
+ unregister_chrdev(PPP_MAJOR, "ppp");
device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
class_destroy(ppp_class);
}
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index d5bdd257465..f79cf87a2bf 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -200,11 +200,10 @@ static void *mppe_alloc(unsigned char *options, int optlen)
|| options[0] != CI_MPPE || options[1] != CILEN_MPPE)
goto out;
- state = kmalloc(sizeof(*state), GFP_KERNEL);
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state == NULL)
goto out;
- memset(state, 0, sizeof(*state));
state->arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(state->arc4)) {
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 5918fab3834..ce64032a465 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -207,13 +207,12 @@ ppp_sync_open(struct tty_struct *tty)
struct syncppp *ap;
int err;
- ap = kmalloc(sizeof(*ap), GFP_KERNEL);
+ ap = kzalloc(sizeof(*ap), GFP_KERNEL);
err = -ENOMEM;
if (ap == 0)
goto out;
/* initialize the syncppp structure */
- memset(ap, 0, sizeof(*ap));
ap->tty = tty;
ap->mru = PPP_MRU;
spin_lock_init(&ap->xmit_lock);
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 5891a0fbdc8..f87176055d0 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -824,6 +824,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
struct pppol2tp_session *session;
struct pppol2tp_tunnel *tunnel;
struct udphdr *uh;
+ unsigned int len;
error = -ENOTCONN;
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
@@ -912,14 +913,15 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
}
/* Queue the packet to IP for output */
+ len = skb->len;
error = ip_queue_xmit(skb, 1);
/* Update stats */
if (error >= 0) {
tunnel->stats.tx_packets++;
- tunnel->stats.tx_bytes += skb->len;
+ tunnel->stats.tx_bytes += len;
session->stats.tx_packets++;
- session->stats.tx_bytes += skb->len;
+ session->stats.tx_bytes += len;
} else {
tunnel->stats.tx_errors++;
session->stats.tx_errors++;
@@ -958,6 +960,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
__wsum csum = 0;
struct sk_buff *skb2 = NULL;
struct udphdr *uh;
+ unsigned int len;
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
goto abort;
@@ -1046,18 +1049,25 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
printk("\n");
}
+ memset(&(IPCB(skb2)->opt), 0, sizeof(IPCB(skb2)->opt));
+ IPCB(skb2)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
+ IPSKB_REROUTED);
+ nf_reset(skb2);
+
/* Get routing info from the tunnel socket */
+ dst_release(skb2->dst);
skb2->dst = sk_dst_get(sk_tun);
/* Queue the packet to IP for output */
+ len = skb2->len;
rc = ip_queue_xmit(skb2, 1);
/* Update stats */
if (rc >= 0) {
tunnel->stats.tx_packets++;
- tunnel->stats.tx_bytes += skb2->len;
+ tunnel->stats.tx_bytes += len;
session->stats.tx_packets++;
- session->stats.tx_bytes += skb2->len;
+ session->stats.tx_bytes += len;
} else {
tunnel->stats.tx_errors++;
session->stats.tx_errors++;
diff --git a/drivers/net/saa9730.c b/drivers/net/saa9730.c
index 451486b32f2..7dae4d40497 100644
--- a/drivers/net/saa9730.c
+++ b/drivers/net/saa9730.c
@@ -940,15 +940,14 @@ static void lan_saa9730_set_multicast(struct net_device *dev)
CAM_CONTROL_GROUP_ACC | CAM_CONTROL_BROAD_ACC,
&lp->lan_saa9730_regs->CamCtl);
} else {
- if (dev->flags & IFF_ALLMULTI) {
+ if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
/* accept all multicast packets */
- writel(CAM_CONTROL_COMP_EN | CAM_CONTROL_GROUP_ACC |
- CAM_CONTROL_BROAD_ACC,
- &lp->lan_saa9730_regs->CamCtl);
- } else {
/*
* Will handle the multicast stuff later. -carstenl
*/
+ writel(CAM_CONTROL_COMP_EN | CAM_CONTROL_GROUP_ACC |
+ CAM_CONTROL_BROAD_ACC,
+ &lp->lan_saa9730_regs->CamCtl);
}
}
diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c
index e886e8d7cfd..4c3d98ff4cd 100644
--- a/drivers/net/shaper.c
+++ b/drivers/net/shaper.c
@@ -600,10 +600,9 @@ static int __init shaper_init(void)
return -ENODEV;
alloc_size = sizeof(*dev) * shapers;
- devs = kmalloc(alloc_size, GFP_KERNEL);
+ devs = kzalloc(alloc_size, GFP_KERNEL);
if (!devs)
return -ENOMEM;
- memset(devs, 0, alloc_size);
for (i = 0; i < shapers; i++) {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index a2f32151559..13f08a390e1 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -692,6 +692,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
{
struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
u16 reg;
+ u32 rx_reg;
int i;
const u8 *addr = hw->dev[port]->dev_addr;
@@ -768,11 +769,11 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
/* Configure Rx MAC FIFO */
sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
- reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
+ rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
if (hw->chip_id == CHIP_ID_YUKON_EX)
- reg |= GMF_RX_OVER_ON;
+ rx_reg |= GMF_RX_OVER_ON;
- sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
+ sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg);
/* Flush Rx MAC FIFO on any flow control or error */
sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index 8a667c13fae..61f98251fea 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -12,6 +12,7 @@
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
+#include <linux/mutex.h>
#include <asm/vio.h>
#include <asm/ldc.h>
@@ -458,6 +459,22 @@ static int vnet_nack(struct vnet_port *port, void *msgbuf)
return 0;
}
+static int handle_mcast(struct vnet_port *port, void *msgbuf)
+{
+ struct vio_net_mcast_info *pkt = msgbuf;
+
+ if (pkt->tag.stype != VIO_SUBTYPE_ACK)
+ printk(KERN_ERR PFX "%s: Got unexpected MCAST reply "
+ "[%02x:%02x:%04x:%08x]\n",
+ port->vp->dev->name,
+ pkt->tag.type,
+ pkt->tag.stype,
+ pkt->tag.stype_env,
+ pkt->tag.sid);
+
+ return 0;
+}
+
static void maybe_tx_wakeup(struct vnet *vp)
{
struct net_device *dev = vp->dev;
@@ -497,6 +514,8 @@ static void vnet_event(void *arg, int event)
vio_link_state_change(vio, event);
spin_unlock_irqrestore(&vio->lock, flags);
+ if (event == LDC_EVENT_RESET)
+ vio_port_up(vio);
return;
}
@@ -541,7 +560,10 @@ static void vnet_event(void *arg, int event)
err = vnet_nack(port, &msgbuf);
}
} else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
- err = vio_control_pkt_engine(vio, &msgbuf);
+ if (msgbuf.tag.stype_env == VNET_MCAST_INFO)
+ err = handle_mcast(port, &msgbuf);
+ else
+ err = vio_control_pkt_engine(vio, &msgbuf);
if (err)
break;
} else {
@@ -728,9 +750,122 @@ static int vnet_close(struct net_device *dev)
return 0;
}
+static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
+{
+ struct vnet_mcast_entry *m;
+
+ for (m = vp->mcast_list; m; m = m->next) {
+ if (!memcmp(m->addr, addr, ETH_ALEN))
+ return m;
+ }
+ return NULL;
+}
+
+static void __update_mc_list(struct vnet *vp, struct net_device *dev)
+{
+ struct dev_addr_list *p;
+
+ for (p = dev->mc_list; p; p = p->next) {
+ struct vnet_mcast_entry *m;
+
+ m = __vnet_mc_find(vp, p->dmi_addr);
+ if (m) {
+ m->hit = 1;
+ continue;
+ }
+
+ if (!m) {
+ m = kzalloc(sizeof(*m), GFP_ATOMIC);
+ if (!m)
+ continue;
+ memcpy(m->addr, p->dmi_addr, ETH_ALEN);
+ m->hit = 1;
+
+ m->next = vp->mcast_list;
+ vp->mcast_list = m;
+ }
+ }
+}
+
+static void __send_mc_list(struct vnet *vp, struct vnet_port *port)
+{
+ struct vio_net_mcast_info info;
+ struct vnet_mcast_entry *m, **pp;
+ int n_addrs;
+
+ memset(&info, 0, sizeof(info));
+
+ info.tag.type = VIO_TYPE_CTRL;
+ info.tag.stype = VIO_SUBTYPE_INFO;
+ info.tag.stype_env = VNET_MCAST_INFO;
+ info.tag.sid = vio_send_sid(&port->vio);
+ info.set = 1;
+
+ n_addrs = 0;
+ for (m = vp->mcast_list; m; m = m->next) {
+ if (m->sent)
+ continue;
+ m->sent = 1;
+ memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
+ m->addr, ETH_ALEN);
+ if (++n_addrs == VNET_NUM_MCAST) {
+ info.count = n_addrs;
+
+ (void) vio_ldc_send(&port->vio, &info,
+ sizeof(info));
+ n_addrs = 0;
+ }
+ }
+ if (n_addrs) {
+ info.count = n_addrs;
+ (void) vio_ldc_send(&port->vio, &info, sizeof(info));
+ }
+
+ info.set = 0;
+
+ n_addrs = 0;
+ pp = &vp->mcast_list;
+ while ((m = *pp) != NULL) {
+ if (m->hit) {
+ m->hit = 0;
+ pp = &m->next;
+ continue;
+ }
+
+ memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
+ m->addr, ETH_ALEN);
+ if (++n_addrs == VNET_NUM_MCAST) {
+ info.count = n_addrs;
+ (void) vio_ldc_send(&port->vio, &info,
+ sizeof(info));
+ n_addrs = 0;
+ }
+
+ *pp = m->next;
+ kfree(m);
+ }
+ if (n_addrs) {
+ info.count = n_addrs;
+ (void) vio_ldc_send(&port->vio, &info, sizeof(info));
+ }
+}
+
static void vnet_set_rx_mode(struct net_device *dev)
{
- /* XXX Implement multicast support XXX */
+ struct vnet *vp = netdev_priv(dev);
+ struct vnet_port *port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vp->lock, flags);
+ if (!list_empty(&vp->port_list)) {
+ port = list_entry(vp->port_list.next, struct vnet_port, list);
+
+ if (port->switch_port) {
+ __update_mc_list(vp, dev);
+ __send_mc_list(vp, port);
+ }
+ }
+ spin_unlock_irqrestore(&vp->lock, flags);
}
static int vnet_change_mtu(struct net_device *dev, int new_mtu)
@@ -875,6 +1010,115 @@ err_out:
return err;
}
+static LIST_HEAD(vnet_list);
+static DEFINE_MUTEX(vnet_list_mutex);
+
+static struct vnet * __devinit vnet_new(const u64 *local_mac)
+{
+ struct net_device *dev;
+ struct vnet *vp;
+ int err, i;
+
+ dev = alloc_etherdev(sizeof(*vp));
+ if (!dev) {
+ printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
+
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+ vp = netdev_priv(dev);
+
+ spin_lock_init(&vp->lock);
+ vp->dev = dev;
+
+ INIT_LIST_HEAD(&vp->port_list);
+ for (i = 0; i < VNET_PORT_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&vp->port_hash[i]);
+ INIT_LIST_HEAD(&vp->list);
+ vp->local_mac = *local_mac;
+
+ dev->open = vnet_open;
+ dev->stop = vnet_close;
+ dev->set_multicast_list = vnet_set_rx_mode;
+ dev->set_mac_address = vnet_set_mac_addr;
+ dev->tx_timeout = vnet_tx_timeout;
+ dev->ethtool_ops = &vnet_ethtool_ops;
+ dev->watchdog_timeo = VNET_TX_TIMEOUT;
+ dev->change_mtu = vnet_change_mtu;
+ dev->hard_start_xmit = vnet_start_xmit;
+
+ err = register_netdev(dev);
+ if (err) {
+ printk(KERN_ERR PFX "Cannot register net device, "
+ "aborting.\n");
+ goto err_out_free_dev;
+ }
+
+ printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name);
+
+ for (i = 0; i < 6; i++)
+ printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':');
+
+ list_add(&vp->list, &vnet_list);
+
+ return vp;
+
+err_out_free_dev:
+ free_netdev(dev);
+
+ return ERR_PTR(err);
+}
+
+static struct vnet * __devinit vnet_find_or_create(const u64 *local_mac)
+{
+ struct vnet *iter, *vp;
+
+ mutex_lock(&vnet_list_mutex);
+ vp = NULL;
+ list_for_each_entry(iter, &vnet_list, list) {
+ if (iter->local_mac == *local_mac) {
+ vp = iter;
+ break;
+ }
+ }
+ if (!vp)
+ vp = vnet_new(local_mac);
+ mutex_unlock(&vnet_list_mutex);
+
+ return vp;
+}
+
+static const char *local_mac_prop = "local-mac-address";
+
+static struct vnet * __devinit vnet_find_parent(struct mdesc_handle *hp,
+ u64 port_node)
+{
+ const u64 *local_mac = NULL;
+ u64 a;
+
+ mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) {
+ u64 target = mdesc_arc_target(hp, a);
+ const char *name;
+
+ name = mdesc_get_property(hp, target, "name", NULL);
+ if (!name || strcmp(name, "network"))
+ continue;
+
+ local_mac = mdesc_get_property(hp, target,
+ local_mac_prop, NULL);
+ if (local_mac)
+ break;
+ }
+ if (!local_mac)
+ return ERR_PTR(-ENODEV);
+
+ return vnet_find_or_create(local_mac);
+}
+
static struct ldc_channel_config vnet_ldc_cfg = {
.event = vnet_event,
.mtu = 64,
@@ -887,6 +1131,14 @@ static struct vio_driver_ops vnet_vio_ops = {
.handshake_complete = vnet_handshake_complete,
};
+static void print_version(void)
+{
+ static int version_printed;
+
+ if (version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+}
+
const char *remote_macaddr_prop = "remote-mac-address";
static int __devinit vnet_port_probe(struct vio_dev *vdev,
@@ -899,14 +1151,17 @@ static int __devinit vnet_port_probe(struct vio_dev *vdev,
const u64 *rmac;
int len, i, err, switch_port;
- vp = dev_get_drvdata(vdev->dev.parent);
- if (!vp) {
- printk(KERN_ERR PFX "Cannot find port parent vnet.\n");
- return -ENODEV;
- }
+ print_version();
hp = mdesc_grab();
+ vp = vnet_find_parent(hp, vdev->mp);
+ if (IS_ERR(vp)) {
+ printk(KERN_ERR PFX "Cannot find port parent vnet.\n");
+ err = PTR_ERR(vp);
+ goto err_out_put_mdesc;
+ }
+
rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
err = -ENODEV;
if (!rmac) {
@@ -947,6 +1202,7 @@ static int __devinit vnet_port_probe(struct vio_dev *vdev,
switch_port = 0;
if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL)
switch_port = 1;
+ port->switch_port = switch_port;
spin_lock_irqsave(&vp->lock, flags);
if (switch_port)
@@ -1013,7 +1269,7 @@ static struct vio_device_id vnet_port_match[] = {
},
{},
};
-MODULE_DEVICE_TABLE(vio, vnet_match);
+MODULE_DEVICE_TABLE(vio, vnet_port_match);
static struct vio_driver vnet_port_driver = {
.id_table = vnet_port_match,
@@ -1025,139 +1281,14 @@ static struct vio_driver vnet_port_driver = {
}
};
-const char *local_mac_prop = "local-mac-address";
-
-static int __devinit vnet_probe(struct vio_dev *vdev,
- const struct vio_device_id *id)
-{
- static int vnet_version_printed;
- struct mdesc_handle *hp;
- struct net_device *dev;
- struct vnet *vp;
- const u64 *mac;
- int err, i, len;
-
- if (vnet_version_printed++ == 0)
- printk(KERN_INFO "%s", version);
-
- hp = mdesc_grab();
-
- mac = mdesc_get_property(hp, vdev->mp, local_mac_prop, &len);
- if (!mac) {
- printk(KERN_ERR PFX "vnet lacks %s property.\n",
- local_mac_prop);
- err = -ENODEV;
- goto err_out;
- }
-
- dev = alloc_etherdev(sizeof(*vp));
- if (!dev) {
- printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
- err = -ENOMEM;
- goto err_out;
- }
-
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = (*mac >> (5 - i) * 8) & 0xff;
-
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
-
- SET_NETDEV_DEV(dev, &vdev->dev);
-
- vp = netdev_priv(dev);
-
- spin_lock_init(&vp->lock);
- vp->dev = dev;
- vp->vdev = vdev;
-
- INIT_LIST_HEAD(&vp->port_list);
- for (i = 0; i < VNET_PORT_HASH_SIZE; i++)
- INIT_HLIST_HEAD(&vp->port_hash[i]);
-
- dev->open = vnet_open;
- dev->stop = vnet_close;
- dev->set_multicast_list = vnet_set_rx_mode;
- dev->set_mac_address = vnet_set_mac_addr;
- dev->tx_timeout = vnet_tx_timeout;
- dev->ethtool_ops = &vnet_ethtool_ops;
- dev->watchdog_timeo = VNET_TX_TIMEOUT;
- dev->change_mtu = vnet_change_mtu;
- dev->hard_start_xmit = vnet_start_xmit;
-
- err = register_netdev(dev);
- if (err) {
- printk(KERN_ERR PFX "Cannot register net device, "
- "aborting.\n");
- goto err_out_free_dev;
- }
-
- printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name);
-
- for (i = 0; i < 6; i++)
- printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':');
-
- dev_set_drvdata(&vdev->dev, vp);
-
- mdesc_release(hp);
-
- return 0;
-
-err_out_free_dev:
- free_netdev(dev);
-
-err_out:
- mdesc_release(hp);
- return err;
-}
-
-static int vnet_remove(struct vio_dev *vdev)
-{
-
- struct vnet *vp = dev_get_drvdata(&vdev->dev);
-
- if (vp) {
- /* XXX unregister port, or at least check XXX */
- unregister_netdevice(vp->dev);
- dev_set_drvdata(&vdev->dev, NULL);
- }
- return 0;
-}
-
-static struct vio_device_id vnet_match[] = {
- {
- .type = "network",
- },
- {},
-};
-MODULE_DEVICE_TABLE(vio, vnet_match);
-
-static struct vio_driver vnet_driver = {
- .id_table = vnet_match,
- .probe = vnet_probe,
- .remove = vnet_remove,
- .driver = {
- .name = "vnet",
- .owner = THIS_MODULE,
- }
-};
-
static int __init vnet_init(void)
{
- int err = vio_register_driver(&vnet_driver);
-
- if (!err) {
- err = vio_register_driver(&vnet_port_driver);
- if (err)
- vio_unregister_driver(&vnet_driver);
- }
-
- return err;
+ return vio_register_driver(&vnet_port_driver);
}
static void __exit vnet_exit(void)
{
vio_unregister_driver(&vnet_port_driver);
- vio_unregister_driver(&vnet_driver);
}
module_init(vnet_init);
diff --git a/drivers/net/sunvnet.h b/drivers/net/sunvnet.h
index 1c887302d46..d347a5bf24b 100644
--- a/drivers/net/sunvnet.h
+++ b/drivers/net/sunvnet.h
@@ -30,6 +30,8 @@ struct vnet_port {
struct hlist_node hash;
u8 raddr[ETH_ALEN];
+ u8 switch_port;
+ u8 __pad;
struct vnet *vp;
@@ -53,6 +55,13 @@ static inline unsigned int vnet_hashfn(u8 *mac)
return val & (VNET_PORT_HASH_MASK);
}
+struct vnet_mcast_entry {
+ u8 addr[ETH_ALEN];
+ u8 sent;
+ u8 hit;
+ struct vnet_mcast_entry *next;
+};
+
struct vnet {
/* Protects port_list and port_hash. */
spinlock_t lock;
@@ -60,11 +69,15 @@ struct vnet {
struct net_device *dev;
u32 msg_enable;
- struct vio_dev *vdev;
struct list_head port_list;
struct hlist_head port_hash[VNET_PORT_HASH_SIZE];
+
+ struct vnet_mcast_entry *mcast_list;
+
+ struct list_head list;
+ u64 local_mac;
};
#endif /* _SUNVNET_H */
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 75655add3f3..7f94ca93098 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -626,7 +626,7 @@ static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev)
return -ENODEV;
}
#else
-static int __devinit tc35815_read_plat_dev_addr(struct device *dev)
+static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev)
{
return -ENODEV;
}
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 5ee14764fd7..887b9a5cfe4 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -64,8 +64,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.78"
-#define DRV_MODULE_RELDATE "July 11, 2007"
+#define DRV_MODULE_VERSION "3.79"
+#define DRV_MODULE_RELDATE "July 18, 2007"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -4847,6 +4847,59 @@ static int tg3_poll_fw(struct tg3 *tp)
return 0;
}
+/* Save PCI command register before chip reset */
+static void tg3_save_pci_state(struct tg3 *tp)
+{
+ u32 val;
+
+ pci_read_config_dword(tp->pdev, TG3PCI_COMMAND, &val);
+ tp->pci_cmd = val;
+}
+
+/* Restore PCI state after chip reset */
+static void tg3_restore_pci_state(struct tg3 *tp)
+{
+ u32 val;
+
+ /* Re-enable indirect register accesses. */
+ pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+ tp->misc_host_ctrl);
+
+ /* Set MAX PCI retry to zero. */
+ val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
+ (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
+ val |= PCISTATE_RETRY_SAME_DMA;
+ pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
+
+ pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd);
+
+ /* Make sure PCI-X relaxed ordering bit is clear. */
+ pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
+ val &= ~PCIX_CAPS_RELAXED_ORDERING;
+ pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
+
+ if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
+ u32 val;
+
+ /* Chip reset on 5780 will reset MSI enable bit,
+ * so need to restore it.
+ */
+ if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+ u16 ctrl;
+
+ pci_read_config_word(tp->pdev,
+ tp->msi_cap + PCI_MSI_FLAGS,
+ &ctrl);
+ pci_write_config_word(tp->pdev,
+ tp->msi_cap + PCI_MSI_FLAGS,
+ ctrl | PCI_MSI_FLAGS_ENABLE);
+ val = tr32(MSGINT_MODE);
+ tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
+ }
+ }
+}
+
static void tg3_stop_fw(struct tg3 *);
/* tp->lock is held. */
@@ -4863,6 +4916,12 @@ static int tg3_chip_reset(struct tg3 *tp)
*/
tp->nvram_lock_cnt = 0;
+ /* GRC_MISC_CFG core clock reset will clear the memory
+ * enable bit in PCI register 4 and the MSI enable bit
+ * on some chips, so we save relevant registers here.
+ */
+ tg3_save_pci_state(tp);
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
@@ -4961,50 +5020,14 @@ static int tg3_chip_reset(struct tg3 *tp)
pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
}
- /* Re-enable indirect register accesses. */
- pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
- tp->misc_host_ctrl);
-
- /* Set MAX PCI retry to zero. */
- val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
- (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
- val |= PCISTATE_RETRY_SAME_DMA;
- pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
-
- pci_restore_state(tp->pdev);
+ tg3_restore_pci_state(tp);
tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
- /* Make sure PCI-X relaxed ordering bit is clear. */
- pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
- val &= ~PCIX_CAPS_RELAXED_ORDERING;
- pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
-
- if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
- u32 val;
-
- /* Chip reset on 5780 will reset MSI enable bit,
- * so need to restore it.
- */
- if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
- u16 ctrl;
-
- pci_read_config_word(tp->pdev,
- tp->msi_cap + PCI_MSI_FLAGS,
- &ctrl);
- pci_write_config_word(tp->pdev,
- tp->msi_cap + PCI_MSI_FLAGS,
- ctrl | PCI_MSI_FLAGS_ENABLE);
- val = tr32(MSGINT_MODE);
- tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
- }
-
+ val = 0;
+ if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
val = tr32(MEMARB_MODE);
- tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
-
- } else
- tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
+ tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
tg3_stop_fw(tp);
@@ -11978,7 +12001,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
*/
if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
- pci_save_state(tp->pdev);
tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
}
@@ -12007,12 +12029,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tg3_init_coal(tp);
- /* Now that we have fully setup the chip, save away a snapshot
- * of the PCI config space. We need to restore this after
- * GRC_MISC_CFG core clock resets and some resume events.
- */
- pci_save_state(tp->pdev);
-
pci_set_drvdata(pdev, dev);
err = register_netdev(dev);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index d84e75e7365..5c21f49026c 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2345,6 +2345,7 @@ struct tg3 {
#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */
u32 led_ctrl;
+ u32 pci_cmd;
char board_part_number[24];
char fw_ver[16];
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 6b63b350cd5..8ead774d14c 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -315,12 +315,11 @@ static int __init c101_run(unsigned long irq, unsigned long winbase)
return -ENODEV;
}
- card = kmalloc(sizeof(card_t), GFP_KERNEL);
+ card = kzalloc(sizeof(card_t), GFP_KERNEL);
if (card == NULL) {
printk(KERN_ERR "c101: unable to allocate memory\n");
return -ENOBUFS;
}
- memset(card, 0, sizeof(card_t));
card->dev = alloc_hdlcdev(card);
if (!card->dev) {
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 9ef49ce148b..26058b4f8f3 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -572,13 +572,11 @@ static int cosa_probe(int base, int irq, int dma)
sprintf(cosa->name, "cosa%d", cosa->num);
/* Initialize the per-channel data */
- cosa->chan = kmalloc(sizeof(struct channel_data)*cosa->nchannels,
- GFP_KERNEL);
+ cosa->chan = kcalloc(cosa->nchannels, sizeof(struct channel_data), GFP_KERNEL);
if (!cosa->chan) {
err = -ENOMEM;
goto err_out3;
}
- memset(cosa->chan, 0, sizeof(struct channel_data)*cosa->nchannels);
for (i=0; i<cosa->nchannels; i++) {
cosa->chan[i].cosa = cosa;
cosa->chan[i].num = i;
diff --git a/drivers/net/wan/cycx_main.c b/drivers/net/wan/cycx_main.c
index 6e5f1c89851..a0e8611ad8e 100644
--- a/drivers/net/wan/cycx_main.c
+++ b/drivers/net/wan/cycx_main.c
@@ -113,12 +113,10 @@ static int __init cycx_init(void)
/* Verify number of cards and allocate adapter data space */
cycx_ncards = min_t(int, cycx_ncards, CYCX_MAX_CARDS);
cycx_ncards = max_t(int, cycx_ncards, 1);
- cycx_card_array = kmalloc(sizeof(struct cycx_device) * cycx_ncards,
- GFP_KERNEL);
+ cycx_card_array = kcalloc(cycx_ncards, sizeof(struct cycx_device), GFP_KERNEL);
if (!cycx_card_array)
goto out;
- memset(cycx_card_array, 0, sizeof(struct cycx_device) * cycx_ncards);
/* Register adapters with WAN router */
for (cnt = 0; cnt < cycx_ncards; ++cnt) {
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
index 016b3ff3ea5..a8af28b273d 100644
--- a/drivers/net/wan/cycx_x25.c
+++ b/drivers/net/wan/cycx_x25.c
@@ -376,11 +376,10 @@ static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
}
/* allocate and initialize private data */
- chan = kmalloc(sizeof(struct cycx_x25_channel), GFP_KERNEL);
+ chan = kzalloc(sizeof(struct cycx_x25_channel), GFP_KERNEL);
if (!chan)
return -ENOMEM;
- memset(chan, 0, sizeof(*chan));
strcpy(chan->name, conf->name);
chan->card = card;
chan->link = conf->port;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index dca02447145..50d2f9108dc 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -890,12 +890,11 @@ static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr)
struct dscc4_dev_priv *root;
int i, ret = -ENOMEM;
- root = kmalloc(dev_per_card*sizeof(*root), GFP_KERNEL);
+ root = kcalloc(dev_per_card, sizeof(*root), GFP_KERNEL);
if (!root) {
printk(KERN_ERR "%s: can't allocate data\n", DRV_NAME);
goto err_out;
}
- memset(root, 0, dev_per_card*sizeof(*root));
for (i = 0; i < dev_per_card; i++) {
root[i].dev = alloc_hdlcdev(root + i);
@@ -903,12 +902,11 @@ static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr)
goto err_free_dev;
}
- ppriv = kmalloc(sizeof(*ppriv), GFP_KERNEL);
+ ppriv = kzalloc(sizeof(*ppriv), GFP_KERNEL);
if (!ppriv) {
printk(KERN_ERR "%s: can't allocate private data\n", DRV_NAME);
goto err_free_dev;
}
- memset(ppriv, 0, sizeof(struct dscc4_pci_priv));
ppriv->root = root;
spin_lock_init(&ppriv->lock);
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 58a53b6d9b4..12dae8e2484 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2476,13 +2476,12 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Allocate driver private data */
- card = kmalloc(sizeof (struct fst_card_info), GFP_KERNEL);
+ card = kzalloc(sizeof (struct fst_card_info), GFP_KERNEL);
if (card == NULL) {
printk_err("FarSync card found but insufficient memory for"
" driver storage\n");
return -ENOMEM;
}
- memset(card, 0, sizeof (struct fst_card_info));
/* Try to enable the device */
if ((err = pci_enable_device(pdev)) != 0) {
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index 9ba3e4ee6ec..bf5f8d9b5c8 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -231,11 +231,10 @@ static struct sv11_device *sv11_init(int iobase, int irq)
return NULL;
}
- sv = kmalloc(sizeof(struct sv11_device), GFP_KERNEL);
+ sv = kzalloc(sizeof(struct sv11_device), GFP_KERNEL);
if(!sv)
goto fail3;
- memset(sv, 0, sizeof(*sv));
sv->if_ptr=&sv->netdev;
sv->netdev.dev = alloc_netdev(0, "hdlc%d", sv11_setup);
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 5c322dfb79f..cbdf0b748bd 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -351,12 +351,11 @@ static int __init n2_run(unsigned long io, unsigned long irq,
return -ENODEV;
}
- card = kmalloc(sizeof(card_t), GFP_KERNEL);
+ card = kzalloc(sizeof(card_t), GFP_KERNEL);
if (card == NULL) {
printk(KERN_ERR "n2: unable to allocate memory\n");
return -ENOBUFS;
}
- memset(card, 0, sizeof(card_t));
card->ports[0].dev = alloc_hdlcdev(&card->ports[0]);
card->ports[1].dev = alloc_hdlcdev(&card->ports[1]);
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 5d8c78ee2cd..99fee2f1d01 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -3456,7 +3456,7 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((err = pci_enable_device(pdev)) < 0)
return err;
- card = kmalloc(sizeof(pc300_t), GFP_KERNEL);
+ card = kzalloc(sizeof(pc300_t), GFP_KERNEL);
if (card == NULL) {
printk("PC300 found at RAM 0x%016llx, "
"but could not allocate card structure.\n",
@@ -3464,7 +3464,6 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -ENOMEM;
goto err_disable_dev;
}
- memset(card, 0, sizeof(pc300_t));
err = -ENODEV;
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index dfbd3b00f03..6353cb5c658 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -334,14 +334,13 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
return i;
}
- card = kmalloc(sizeof(card_t), GFP_KERNEL);
+ card = kzalloc(sizeof(card_t), GFP_KERNEL);
if (card == NULL) {
printk(KERN_ERR "pc300: unable to allocate memory\n");
pci_release_regions(pdev);
pci_disable_device(pdev);
return -ENOBUFS;
}
- memset(card, 0, sizeof(card_t));
pci_set_drvdata(pdev, card);
if (pdev->device == PCI_DEVICE_ID_PC300_TE_1 ||
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index 7f720de2e9f..092e51d8903 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -312,14 +312,13 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
return i;
}
- card = kmalloc(sizeof(card_t), GFP_KERNEL);
+ card = kzalloc(sizeof(card_t), GFP_KERNEL);
if (card == NULL) {
printk(KERN_ERR "pci200syn: unable to allocate memory\n");
pci_release_regions(pdev);
pci_disable_device(pdev);
return -ENOBUFS;
}
- memset(card, 0, sizeof(card_t));
pci_set_drvdata(pdev, card);
card->ports[0].dev = alloc_hdlcdev(&card->ports[0]);
card->ports[1].dev = alloc_hdlcdev(&card->ports[1]);
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 6a485f0556f..792e588d7d6 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -1196,10 +1196,9 @@ static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int r
if (read)
{
- temp = kmalloc(mem.len, GFP_KERNEL);
+ temp = kzalloc(mem.len, GFP_KERNEL);
if (!temp)
return(-ENOMEM);
- memset(temp, 0, mem.len);
sdla_read(dev, mem.addr, temp, mem.len);
if(copy_to_user(mem.data, temp, mem.len))
{
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 131358108c5..11276bf3149 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -270,11 +270,10 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
return NULL;
}
- b = kmalloc(sizeof(struct slvl_board), GFP_KERNEL);
+ b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL);
if(!b)
goto fail3;
- memset(b, 0, sizeof(*b));
if (!(b->dev[0]= slvl_alloc(iobase, irq)))
goto fail2;
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index c7360157433..3c78f985638 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -599,7 +599,7 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
}
alloc_size = sizeof(card_t) + ports * sizeof(port_t);
- card = kmalloc(alloc_size, GFP_KERNEL);
+ card = kzalloc(alloc_size, GFP_KERNEL);
if (card == NULL) {
printk(KERN_ERR "wanXL %s: unable to allocate memory\n",
pci_name(pdev));
@@ -607,7 +607,6 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
pci_disable_device(pdev);
return -ENOBUFS;
}
- memset(card, 0, alloc_size);
pci_set_drvdata(pdev, card);
card->pdev = pdev;
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 1c9edd97acc..c48b1cc63fd 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -786,14 +786,12 @@ static int __init init_x25_asy(void)
printk(KERN_INFO "X.25 async: version 0.00 ALPHA "
"(dynamic channels, max=%d).\n", x25_asy_maxdev );
- x25_asy_devs = kmalloc(sizeof(struct net_device *)*x25_asy_maxdev,
- GFP_KERNEL);
+ x25_asy_devs = kcalloc(x25_asy_maxdev, sizeof(struct net_device*), GFP_KERNEL);
if (!x25_asy_devs) {
printk(KERN_WARNING "X25 async: Can't allocate x25_asy_ctrls[] "
"array! Uaargh! (-> No X.25 available)\n");
return -ENOMEM;
}
- memset(x25_asy_devs, 0, sizeof(struct net_device *)*x25_asy_maxdev);
return tty_register_ldisc(N_X25, &x25_ldisc);
}
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 072ede71e57..8990585bd22 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -7868,10 +7868,10 @@ static int ipw2100_wx_set_powermode(struct net_device *dev,
goto done;
}
- if ((mode < 1) || (mode > POWER_MODES))
+ if ((mode < 0) || (mode > POWER_MODES))
mode = IPW_POWER_AUTO;
- if (priv->power_mode != mode)
+ if (IPW_POWER_LEVEL(priv->power_mode) != mode)
err = ipw2100_set_power_mode(priv, mode);
done:
mutex_unlock(&priv->action_mutex);
@@ -7902,7 +7902,7 @@ static int ipw2100_wx_get_powermode(struct net_device *dev,
break;
case IPW_POWER_AUTO:
snprintf(extra, MAX_POWER_STRING,
- "Power save level: %d (Auto)", 0);
+ "Power save level: %d (Auto)", level);
break;
default:
timeout = timeout_duration[level - 1] / 1000;
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index aa32a97380e..61497c46746 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -70,7 +70,7 @@
#define VQ
#endif
-#define IPW2200_VERSION "1.2.0" VK VD VM VP VR VQ
+#define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
#define DRV_VERSION IPW2200_VERSION
@@ -2506,7 +2506,7 @@ static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
break;
}
- param = cpu_to_le32(mode);
+ param = cpu_to_le32(param);
return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
&param);
}
@@ -9568,6 +9568,7 @@ static int ipw_wx_set_power(struct net_device *dev,
priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
else
priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
+
err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
if (err) {
IPW_DEBUG_WX("failed setting power mode.\n");
@@ -9604,22 +9605,19 @@ static int ipw_wx_set_powermode(struct net_device *dev,
struct ipw_priv *priv = ieee80211_priv(dev);
int mode = *(int *)extra;
int err;
+
mutex_lock(&priv->mutex);
- if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
+ if ((mode < 1) || (mode > IPW_POWER_LIMIT))
mode = IPW_POWER_AC;
- priv->power_mode = mode;
- } else {
- priv->power_mode = IPW_POWER_ENABLED | mode;
- }
- if (priv->power_mode != mode) {
+ if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
err = ipw_send_power_mode(priv, mode);
-
if (err) {
IPW_DEBUG_WX("failed setting power mode.\n");
mutex_unlock(&priv->mutex);
return err;
}
+ priv->power_mode = IPW_POWER_ENABLED | mode;
}
mutex_unlock(&priv->mutex);
return 0;
@@ -10555,7 +10553,7 @@ static irqreturn_t ipw_isr(int irq, void *data)
spin_lock(&priv->irq_lock);
if (!(priv->status & STATUS_INT_ENABLED)) {
- /* Shared IRQ */
+ /* IRQ is disabled */
goto none;
}
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 28d41a29d7b..a9c339ef116 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -72,6 +72,8 @@ static struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x0586, 0x3413), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B },
+ { USB_DEVICE(0x2019, 0x5303), .driver_info = DEVICE_ZD1211B },
+ { USB_DEVICE(0x129b, 0x1667), .driver_info = DEVICE_ZD1211B },
/* "Driverless" devices that need ejecting */
{ USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
{ USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
new file mode 100644
index 00000000000..489f69c5d6c
--- /dev/null
+++ b/drivers/net/xen-netfront.c
@@ -0,0 +1,1863 @@
+/*
+ * Virtual network driver for conversing with remote driver backends.
+ *
+ * Copyright (c) 2002-2005, K A Fraser
+ * Copyright (c) 2005, XenSource Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/moduleparam.h>
+#include <linux/mm.h>
+#include <net/ip.h>
+
+#include <xen/xenbus.h>
+#include <xen/events.h>
+#include <xen/page.h>
+#include <xen/grant_table.h>
+
+#include <xen/interface/io/netif.h>
+#include <xen/interface/memory.h>
+#include <xen/interface/grant_table.h>
+
+static struct ethtool_ops xennet_ethtool_ops;
+
+struct netfront_cb {
+ struct page *page;
+ unsigned offset;
+};
+
+#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
+
+#define RX_COPY_THRESHOLD 256
+
+#define GRANT_INVALID_REF 0
+
+#define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE)
+#define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE)
+#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
+
+struct netfront_info {
+ struct list_head list;
+ struct net_device *netdev;
+
+ struct net_device_stats stats;
+
+ struct xen_netif_tx_front_ring tx;
+ struct xen_netif_rx_front_ring rx;
+
+ spinlock_t tx_lock;
+ spinlock_t rx_lock;
+
+ unsigned int evtchn;
+
+ /* Receive-ring batched refills. */
+#define RX_MIN_TARGET 8
+#define RX_DFL_MIN_TARGET 64
+#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
+ unsigned rx_min_target, rx_max_target, rx_target;
+ struct sk_buff_head rx_batch;
+
+ struct timer_list rx_refill_timer;
+
+ /*
+ * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
+ * are linked from tx_skb_freelist through skb_entry.link.
+ *
+ * NB. Freelist index entries are always going to be less than
+ * PAGE_OFFSET, whereas pointers to skbs will always be equal or
+ * greater than PAGE_OFFSET: we use this property to distinguish
+ * them.
+ */
+ union skb_entry {
+ struct sk_buff *skb;
+ unsigned link;
+ } tx_skbs[NET_TX_RING_SIZE];
+ grant_ref_t gref_tx_head;
+ grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
+ unsigned tx_skb_freelist;
+
+ struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
+ grant_ref_t gref_rx_head;
+ grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
+
+ struct xenbus_device *xbdev;
+ int tx_ring_ref;
+ int rx_ring_ref;
+
+ unsigned long rx_pfn_array[NET_RX_RING_SIZE];
+ struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
+ struct mmu_update rx_mmu[NET_RX_RING_SIZE];
+};
+
+struct netfront_rx_info {
+ struct xen_netif_rx_response rx;
+ struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
+};
+
+/*
+ * Access macros for acquiring freeing slots in tx_skbs[].
+ */
+
+static void add_id_to_freelist(unsigned *head, union skb_entry *list,
+ unsigned short id)
+{
+ list[id].link = *head;
+ *head = id;
+}
+
+static unsigned short get_id_from_freelist(unsigned *head,
+ union skb_entry *list)
+{
+ unsigned int id = *head;
+ *head = list[id].link;
+ return id;
+}
+
+static int xennet_rxidx(RING_IDX idx)
+{
+ return idx & (NET_RX_RING_SIZE - 1);
+}
+
+static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
+ RING_IDX ri)
+{
+ int i = xennet_rxidx(ri);
+ struct sk_buff *skb = np->rx_skbs[i];
+ np->rx_skbs[i] = NULL;
+ return skb;
+}
+
+static grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
+ RING_IDX ri)
+{
+ int i = xennet_rxidx(ri);
+ grant_ref_t ref = np->grant_rx_ref[i];
+ np->grant_rx_ref[i] = GRANT_INVALID_REF;
+ return ref;
+}
+
+#ifdef CONFIG_SYSFS
+static int xennet_sysfs_addif(struct net_device *netdev);
+static void xennet_sysfs_delif(struct net_device *netdev);
+#else /* !CONFIG_SYSFS */
+#define xennet_sysfs_addif(dev) (0)
+#define xennet_sysfs_delif(dev) do { } while (0)
+#endif
+
+static int xennet_can_sg(struct net_device *dev)
+{
+ return dev->features & NETIF_F_SG;
+}
+
+
+static void rx_refill_timeout(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ netif_rx_schedule(dev);
+}
+
+static int netfront_tx_slot_available(struct netfront_info *np)
+{
+ return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
+ (TX_MAX_TARGET - MAX_SKB_FRAGS - 2));
+}
+
+static void xennet_maybe_wake_tx(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+
+ if (unlikely(netif_queue_stopped(dev)) &&
+ netfront_tx_slot_available(np) &&
+ likely(netif_running(dev)))
+ netif_wake_queue(dev);
+}
+
+static void xennet_alloc_rx_buffers(struct net_device *dev)
+{
+ unsigned short id;
+ struct netfront_info *np = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct page *page;
+ int i, batch_target, notify;
+ RING_IDX req_prod = np->rx.req_prod_pvt;
+ struct xen_memory_reservation reservation;
+ grant_ref_t ref;
+ unsigned long pfn;
+ void *vaddr;
+ int nr_flips;
+ struct xen_netif_rx_request *req;
+
+ if (unlikely(!netif_carrier_ok(dev)))
+ return;
+
+ /*
+ * Allocate skbuffs greedily, even though we batch updates to the
+ * receive ring. This creates a less bursty demand on the memory
+ * allocator, so should reduce the chance of failed allocation requests
+ * both for ourself and for other kernel subsystems.
+ */
+ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
+ for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
+ skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ goto no_skb;
+
+ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
+ if (!page) {
+ kfree_skb(skb);
+no_skb:
+ /* Any skbuffs queued for refill? Force them out. */
+ if (i != 0)
+ goto refill;
+ /* Could not allocate any skbuffs. Try again later. */
+ mod_timer(&np->rx_refill_timer,
+ jiffies + (HZ/10));
+ break;
+ }
+
+ skb_shinfo(skb)->frags[0].page = page;
+ skb_shinfo(skb)->nr_frags = 1;
+ __skb_queue_tail(&np->rx_batch, skb);
+ }
+
+ /* Is the batch large enough to be worthwhile? */
+ if (i < (np->rx_target/2)) {
+ if (req_prod > np->rx.sring->req_prod)
+ goto push;
+ return;
+ }
+
+ /* Adjust our fill target if we risked running out of buffers. */
+ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
+ ((np->rx_target *= 2) > np->rx_max_target))
+ np->rx_target = np->rx_max_target;
+
+ refill:
+ for (nr_flips = i = 0; ; i++) {
+ skb = __skb_dequeue(&np->rx_batch);
+ if (skb == NULL)
+ break;
+
+ skb->dev = dev;
+
+ id = xennet_rxidx(req_prod + i);
+
+ BUG_ON(np->rx_skbs[id]);
+ np->rx_skbs[id] = skb;
+
+ ref = gnttab_claim_grant_reference(&np->gref_rx_head);
+ BUG_ON((signed short)ref < 0);
+ np->grant_rx_ref[id] = ref;
+
+ pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
+ vaddr = page_address(skb_shinfo(skb)->frags[0].page);
+
+ req = RING_GET_REQUEST(&np->rx, req_prod + i);
+ gnttab_grant_foreign_access_ref(ref,
+ np->xbdev->otherend_id,
+ pfn_to_mfn(pfn),
+ 0);
+
+ req->id = id;
+ req->gref = ref;
+ }
+
+ if (nr_flips != 0) {
+ reservation.extent_start = np->rx_pfn_array;
+ reservation.nr_extents = nr_flips;
+ reservation.extent_order = 0;
+ reservation.address_bits = 0;
+ reservation.domid = DOMID_SELF;
+
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ /* After all PTEs have been zapped, flush the TLB. */
+ np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
+ UVMF_TLB_FLUSH|UVMF_ALL;
+
+ /* Give away a batch of pages. */
+ np->rx_mcl[i].op = __HYPERVISOR_memory_op;
+ np->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
+ np->rx_mcl[i].args[1] = (unsigned long)&reservation;
+
+ /* Zap PTEs and give away pages in one big
+ * multicall. */
+ (void)HYPERVISOR_multicall(np->rx_mcl, i+1);
+
+ /* Check return status of HYPERVISOR_memory_op(). */
+ if (unlikely(np->rx_mcl[i].result != i))
+ panic("Unable to reduce memory reservation\n");
+ } else {
+ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
+ &reservation) != i)
+ panic("Unable to reduce memory reservation\n");
+ }
+ } else {
+ wmb(); /* barrier so backend seens requests */
+ }
+
+ /* Above is a suitable barrier to ensure backend will see requests. */
+ np->rx.req_prod_pvt = req_prod + i;
+ push:
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
+ if (notify)
+ notify_remote_via_irq(np->netdev->irq);
+}
+
+static int xennet_open(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+
+ memset(&np->stats, 0, sizeof(np->stats));
+
+ spin_lock_bh(&np->rx_lock);
+ if (netif_carrier_ok(dev)) {
+ xennet_alloc_rx_buffers(dev);
+ np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
+ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
+ netif_rx_schedule(dev);
+ }
+ spin_unlock_bh(&np->rx_lock);
+
+ xennet_maybe_wake_tx(dev);
+
+ return 0;
+}
+
+static void xennet_tx_buf_gc(struct net_device *dev)
+{
+ RING_IDX cons, prod;
+ unsigned short id;
+ struct netfront_info *np = netdev_priv(dev);
+ struct sk_buff *skb;
+
+ BUG_ON(!netif_carrier_ok(dev));
+
+ do {
+ prod = np->tx.sring->rsp_prod;
+ rmb(); /* Ensure we see responses up to 'rp'. */
+
+ for (cons = np->tx.rsp_cons; cons != prod; cons++) {
+ struct xen_netif_tx_response *txrsp;
+
+ txrsp = RING_GET_RESPONSE(&np->tx, cons);
+ if (txrsp->status == NETIF_RSP_NULL)
+ continue;
+
+ id = txrsp->id;
+ skb = np->tx_skbs[id].skb;
+ if (unlikely(gnttab_query_foreign_access(
+ np->grant_tx_ref[id]) != 0)) {
+ printk(KERN_ALERT "xennet_tx_buf_gc: warning "
+ "-- grant still in use by backend "
+ "domain.\n");
+ BUG();
+ }
+ gnttab_end_foreign_access_ref(
+ np->grant_tx_ref[id], GNTMAP_readonly);
+ gnttab_release_grant_reference(
+ &np->gref_tx_head, np->grant_tx_ref[id]);
+ np->grant_tx_ref[id] = GRANT_INVALID_REF;
+ add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
+ dev_kfree_skb_irq(skb);
+ }
+
+ np->tx.rsp_cons = prod;
+
+ /*
+ * Set a new event, then check for race with update of tx_cons.
+ * Note that it is essential to schedule a callback, no matter
+ * how few buffers are pending. Even if there is space in the
+ * transmit ring, higher layers may be blocked because too much
+ * data is outstanding: in such cases notification from Xen is
+ * likely to be the only kick that we'll get.
+ */
+ np->tx.sring->rsp_event =
+ prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
+ mb(); /* update shared area */
+ } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
+
+ xennet_maybe_wake_tx(dev);
+}
+
+static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
+ struct xen_netif_tx_request *tx)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ char *data = skb->data;
+ unsigned long mfn;
+ RING_IDX prod = np->tx.req_prod_pvt;
+ int frags = skb_shinfo(skb)->nr_frags;
+ unsigned int offset = offset_in_page(data);
+ unsigned int len = skb_headlen(skb);
+ unsigned int id;
+ grant_ref_t ref;
+ int i;
+
+ /* While the header overlaps a page boundary (including being
+ larger than a page), split it it into page-sized chunks. */
+ while (len > PAGE_SIZE - offset) {
+ tx->size = PAGE_SIZE - offset;
+ tx->flags |= NETTXF_more_data;
+ len -= tx->size;
+ data += tx->size;
+ offset = 0;
+
+ id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
+ np->tx_skbs[id].skb = skb_get(skb);
+ tx = RING_GET_REQUEST(&np->tx, prod++);
+ tx->id = id;
+ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+ BUG_ON((signed short)ref < 0);
+
+ mfn = virt_to_mfn(data);
+ gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
+ mfn, GNTMAP_readonly);
+
+ tx->gref = np->grant_tx_ref[id] = ref;
+ tx->offset = offset;
+ tx->size = len;
+ tx->flags = 0;
+ }
+
+ /* Grant backend access to each skb fragment page. */
+ for (i = 0; i < frags; i++) {
+ skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+
+ tx->flags |= NETTXF_more_data;
+
+ id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
+ np->tx_skbs[id].skb = skb_get(skb);
+ tx = RING_GET_REQUEST(&np->tx, prod++);
+ tx->id = id;
+ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+ BUG_ON((signed short)ref < 0);
+
+ mfn = pfn_to_mfn(page_to_pfn(frag->page));
+ gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
+ mfn, GNTMAP_readonly);
+
+ tx->gref = np->grant_tx_ref[id] = ref;
+ tx->offset = frag->page_offset;
+ tx->size = frag->size;
+ tx->flags = 0;
+ }
+
+ np->tx.req_prod_pvt = prod;
+}
+
+static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned short id;
+ struct netfront_info *np = netdev_priv(dev);
+ struct xen_netif_tx_request *tx;
+ struct xen_netif_extra_info *extra;
+ char *data = skb->data;
+ RING_IDX i;
+ grant_ref_t ref;
+ unsigned long mfn;
+ int notify;
+ int frags = skb_shinfo(skb)->nr_frags;
+ unsigned int offset = offset_in_page(data);
+ unsigned int len = skb_headlen(skb);
+
+ frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE;
+ if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
+ printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
+ frags);
+ dump_stack();
+ goto drop;
+ }
+
+ spin_lock_irq(&np->tx_lock);
+
+ if (unlikely(!netif_carrier_ok(dev) ||
+ (frags > 1 && !xennet_can_sg(dev)) ||
+ netif_needs_gso(dev, skb))) {
+ spin_unlock_irq(&np->tx_lock);
+ goto drop;
+ }
+
+ i = np->tx.req_prod_pvt;
+
+ id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
+ np->tx_skbs[id].skb = skb;
+
+ tx = RING_GET_REQUEST(&np->tx, i);
+
+ tx->id = id;
+ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+ BUG_ON((signed short)ref < 0);
+ mfn = virt_to_mfn(data);
+ gnttab_grant_foreign_access_ref(
+ ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
+ tx->gref = np->grant_tx_ref[id] = ref;
+ tx->offset = offset;
+ tx->size = len;
+ extra = NULL;
+
+ tx->flags = 0;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ /* local packet? */
+ tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
+ else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+ /* remote but checksummed. */
+ tx->flags |= NETTXF_data_validated;
+
+ if (skb_shinfo(skb)->gso_size) {
+ struct xen_netif_extra_info *gso;
+
+ gso = (struct xen_netif_extra_info *)
+ RING_GET_REQUEST(&np->tx, ++i);
+
+ if (extra)
+ extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
+ else
+ tx->flags |= NETTXF_extra_info;
+
+ gso->u.gso.size = skb_shinfo(skb)->gso_size;
+ gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
+ gso->u.gso.pad = 0;
+ gso->u.gso.features = 0;
+
+ gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
+ gso->flags = 0;
+ extra = gso;
+ }
+
+ np->tx.req_prod_pvt = i + 1;
+
+ xennet_make_frags(skb, dev, tx);
+ tx->size = skb->len;
+
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
+ if (notify)
+ notify_remote_via_irq(np->netdev->irq);
+
+ xennet_tx_buf_gc(dev);
+
+ if (!netfront_tx_slot_available(np))
+ netif_stop_queue(dev);
+
+ spin_unlock_irq(&np->tx_lock);
+
+ np->stats.tx_bytes += skb->len;
+ np->stats.tx_packets++;
+
+ return 0;
+
+ drop:
+ np->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static int xennet_close(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ netif_stop_queue(np->netdev);
+ return 0;
+}
+
+static struct net_device_stats *xennet_get_stats(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ return &np->stats;
+}
+
+static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
+ grant_ref_t ref)
+{
+ int new = xennet_rxidx(np->rx.req_prod_pvt);
+
+ BUG_ON(np->rx_skbs[new]);
+ np->rx_skbs[new] = skb;
+ np->grant_rx_ref[new] = ref;
+ RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
+ RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
+ np->rx.req_prod_pvt++;
+}
+
+static int xennet_get_extras(struct netfront_info *np,
+ struct xen_netif_extra_info *extras,
+ RING_IDX rp)
+
+{
+ struct xen_netif_extra_info *extra;
+ struct device *dev = &np->netdev->dev;
+ RING_IDX cons = np->rx.rsp_cons;
+ int err = 0;
+
+ do {
+ struct sk_buff *skb;
+ grant_ref_t ref;
+
+ if (unlikely(cons + 1 == rp)) {
+ if (net_ratelimit())
+ dev_warn(dev, "Missing extra info\n");
+ err = -EBADR;
+ break;
+ }
+
+ extra = (struct xen_netif_extra_info *)
+ RING_GET_RESPONSE(&np->rx, ++cons);
+
+ if (unlikely(!extra->type ||
+ extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
+ if (net_ratelimit())
+ dev_warn(dev, "Invalid extra type: %d\n",
+ extra->type);
+ err = -EINVAL;
+ } else {
+ memcpy(&extras[extra->type - 1], extra,
+ sizeof(*extra));
+ }
+
+ skb = xennet_get_rx_skb(np, cons);
+ ref = xennet_get_rx_ref(np, cons);
+ xennet_move_rx_slot(np, skb, ref);
+ } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
+
+ np->rx.rsp_cons = cons;
+ return err;
+}
+
+static int xennet_get_responses(struct netfront_info *np,
+ struct netfront_rx_info *rinfo, RING_IDX rp,
+ struct sk_buff_head *list)
+{
+ struct xen_netif_rx_response *rx = &rinfo->rx;
+ struct xen_netif_extra_info *extras = rinfo->extras;
+ struct device *dev = &np->netdev->dev;
+ RING_IDX cons = np->rx.rsp_cons;
+ struct sk_buff *skb = xennet_get_rx_skb(np, cons);
+ grant_ref_t ref = xennet_get_rx_ref(np, cons);
+ int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
+ int frags = 1;
+ int err = 0;
+ unsigned long ret;
+
+ if (rx->flags & NETRXF_extra_info) {
+ err = xennet_get_extras(np, extras, rp);
+ cons = np->rx.rsp_cons;
+ }
+
+ for (;;) {
+ if (unlikely(rx->status < 0 ||
+ rx->offset + rx->status > PAGE_SIZE)) {
+ if (net_ratelimit())
+ dev_warn(dev, "rx->offset: %x, size: %u\n",
+ rx->offset, rx->status);
+ xennet_move_rx_slot(np, skb, ref);
+ err = -EINVAL;
+ goto next;
+ }
+
+ /*
+ * This definitely indicates a bug, either in this driver or in
+ * the backend driver. In future this should flag the bad
+ * situation to the system controller to reboot the backed.
+ */
+ if (ref == GRANT_INVALID_REF) {
+ if (net_ratelimit())
+ dev_warn(dev, "Bad rx response id %d.\n",
+ rx->id);
+ err = -EINVAL;
+ goto next;
+ }
+
+ ret = gnttab_end_foreign_access_ref(ref, 0);
+ BUG_ON(!ret);
+
+ gnttab_release_grant_reference(&np->gref_rx_head, ref);
+
+ __skb_queue_tail(list, skb);
+
+next:
+ if (!(rx->flags & NETRXF_more_data))
+ break;
+
+ if (cons + frags == rp) {
+ if (net_ratelimit())
+ dev_warn(dev, "Need more frags\n");
+ err = -ENOENT;
+ break;
+ }
+
+ rx = RING_GET_RESPONSE(&np->rx, cons + frags);
+ skb = xennet_get_rx_skb(np, cons + frags);
+ ref = xennet_get_rx_ref(np, cons + frags);
+ frags++;
+ }
+
+ if (unlikely(frags > max)) {
+ if (net_ratelimit())
+ dev_warn(dev, "Too many frags\n");
+ err = -E2BIG;
+ }
+
+ if (unlikely(err))
+ np->rx.rsp_cons = cons + frags;
+
+ return err;
+}
+
+static int xennet_set_skb_gso(struct sk_buff *skb,
+ struct xen_netif_extra_info *gso)
+{
+ if (!gso->u.gso.size) {
+ if (net_ratelimit())
+ printk(KERN_WARNING "GSO size must not be zero.\n");
+ return -EINVAL;
+ }
+
+ /* Currently only TCPv4 S.O. is supported. */
+ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
+ if (net_ratelimit())
+ printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type);
+ return -EINVAL;
+ }
+
+ skb_shinfo(skb)->gso_size = gso->u.gso.size;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+
+ /* Header must be checked, and gso_segs computed. */
+ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
+ skb_shinfo(skb)->gso_segs = 0;
+
+ return 0;
+}
+
+static RING_IDX xennet_fill_frags(struct netfront_info *np,
+ struct sk_buff *skb,
+ struct sk_buff_head *list)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ int nr_frags = shinfo->nr_frags;
+ RING_IDX cons = np->rx.rsp_cons;
+ skb_frag_t *frag = shinfo->frags + nr_frags;
+ struct sk_buff *nskb;
+
+ while ((nskb = __skb_dequeue(list))) {
+ struct xen_netif_rx_response *rx =
+ RING_GET_RESPONSE(&np->rx, ++cons);
+
+ frag->page = skb_shinfo(nskb)->frags[0].page;
+ frag->page_offset = rx->offset;
+ frag->size = rx->status;
+
+ skb->data_len += rx->status;
+
+ skb_shinfo(nskb)->nr_frags = 0;
+ kfree_skb(nskb);
+
+ frag++;
+ nr_frags++;
+ }
+
+ shinfo->nr_frags = nr_frags;
+ return cons;
+}
+
+static int skb_checksum_setup(struct sk_buff *skb)
+{
+ struct iphdr *iph;
+ unsigned char *th;
+ int err = -EPROTO;
+
+ if (skb->protocol != htons(ETH_P_IP))
+ goto out;
+
+ iph = (void *)skb->data;
+ th = skb->data + 4 * iph->ihl;
+ if (th >= skb_tail_pointer(skb))
+ goto out;
+
+ skb->csum_start = th - skb->head;
+ switch (iph->protocol) {
+ case IPPROTO_TCP:
+ skb->csum_offset = offsetof(struct tcphdr, check);
+ break;
+ case IPPROTO_UDP:
+ skb->csum_offset = offsetof(struct udphdr, check);
+ break;
+ default:
+ if (net_ratelimit())
+ printk(KERN_ERR "Attempting to checksum a non-"
+ "TCP/UDP packet, dropping a protocol"
+ " %d packet", iph->protocol);
+ goto out;
+ }
+
+ if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
+ goto out;
+
+ err = 0;
+
+out:
+ return err;
+}
+
+static int handle_incoming_queue(struct net_device *dev,
+ struct sk_buff_head *rxq)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ int packets_dropped = 0;
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(rxq)) != NULL) {
+ struct page *page = NETFRONT_SKB_CB(skb)->page;
+ void *vaddr = page_address(page);
+ unsigned offset = NETFRONT_SKB_CB(skb)->offset;
+
+ memcpy(skb->data, vaddr + offset,
+ skb_headlen(skb));
+
+ if (page != skb_shinfo(skb)->frags[0].page)
+ __free_page(page);
+
+ /* Ethernet work: Delayed to here as it peeks the header. */
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb_checksum_setup(skb)) {
+ kfree_skb(skb);
+ packets_dropped++;
+ np->stats.rx_errors++;
+ continue;
+ }
+ }
+
+ np->stats.rx_packets++;
+ np->stats.rx_bytes += skb->len;
+
+ /* Pass it up. */
+ netif_receive_skb(skb);
+ dev->last_rx = jiffies;
+ }
+
+ return packets_dropped;
+}
+
+static int xennet_poll(struct net_device *dev, int *pbudget)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct netfront_rx_info rinfo;
+ struct xen_netif_rx_response *rx = &rinfo.rx;
+ struct xen_netif_extra_info *extras = rinfo.extras;
+ RING_IDX i, rp;
+ int work_done, budget, more_to_do = 1;
+ struct sk_buff_head rxq;
+ struct sk_buff_head errq;
+ struct sk_buff_head tmpq;
+ unsigned long flags;
+ unsigned int len;
+ int err;
+
+ spin_lock(&np->rx_lock);
+
+ if (unlikely(!netif_carrier_ok(dev))) {
+ spin_unlock(&np->rx_lock);
+ return 0;
+ }
+
+ skb_queue_head_init(&rxq);
+ skb_queue_head_init(&errq);
+ skb_queue_head_init(&tmpq);
+
+ budget = *pbudget;
+ if (budget > dev->quota)
+ budget = dev->quota;
+ rp = np->rx.sring->rsp_prod;
+ rmb(); /* Ensure we see queued responses up to 'rp'. */
+
+ i = np->rx.rsp_cons;
+ work_done = 0;
+ while ((i != rp) && (work_done < budget)) {
+ memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
+ memset(extras, 0, sizeof(rinfo.extras));
+
+ err = xennet_get_responses(np, &rinfo, rp, &tmpq);
+
+ if (unlikely(err)) {
+err:
+ while ((skb = __skb_dequeue(&tmpq)))
+ __skb_queue_tail(&errq, skb);
+ np->stats.rx_errors++;
+ i = np->rx.rsp_cons;
+ continue;
+ }
+
+ skb = __skb_dequeue(&tmpq);
+
+ if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
+ struct xen_netif_extra_info *gso;
+ gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
+
+ if (unlikely(xennet_set_skb_gso(skb, gso))) {
+ __skb_queue_head(&tmpq, skb);
+ np->rx.rsp_cons += skb_queue_len(&tmpq);
+ goto err;
+ }
+ }
+
+ NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page;
+ NETFRONT_SKB_CB(skb)->offset = rx->offset;
+
+ len = rx->status;
+ if (len > RX_COPY_THRESHOLD)
+ len = RX_COPY_THRESHOLD;
+ skb_put(skb, len);
+
+ if (rx->status > len) {
+ skb_shinfo(skb)->frags[0].page_offset =
+ rx->offset + len;
+ skb_shinfo(skb)->frags[0].size = rx->status - len;
+ skb->data_len = rx->status - len;
+ } else {
+ skb_shinfo(skb)->frags[0].page = NULL;
+ skb_shinfo(skb)->nr_frags = 0;
+ }
+
+ i = xennet_fill_frags(np, skb, &tmpq);
+
+ /*
+ * Truesize approximates the size of true data plus
+ * any supervisor overheads. Adding hypervisor
+ * overheads has been shown to significantly reduce
+ * achievable bandwidth with the default receive
+ * buffer size. It is therefore not wise to account
+ * for it here.
+ *
+ * After alloc_skb(RX_COPY_THRESHOLD), truesize is set
+ * to RX_COPY_THRESHOLD + the supervisor
+ * overheads. Here, we add the size of the data pulled
+ * in xennet_fill_frags().
+ *
+ * We also adjust for any unused space in the main
+ * data area by subtracting (RX_COPY_THRESHOLD -
+ * len). This is especially important with drivers
+ * which split incoming packets into header and data,
+ * using only 66 bytes of the main data area (see the
+ * e1000 driver for example.) On such systems,
+ * without this last adjustement, our achievable
+ * receive throughout using the standard receive
+ * buffer size was cut by 25%(!!!).
+ */
+ skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
+ skb->len += skb->data_len;
+
+ if (rx->flags & NETRXF_csum_blank)
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ else if (rx->flags & NETRXF_data_validated)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ __skb_queue_tail(&rxq, skb);
+
+ np->rx.rsp_cons = ++i;
+ work_done++;
+ }
+
+ while ((skb = __skb_dequeue(&errq)))
+ kfree_skb(skb);
+
+ work_done -= handle_incoming_queue(dev, &rxq);
+
+ /* If we get a callback with very few responses, reduce fill target. */
+ /* NB. Note exponential increase, linear decrease. */
+ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
+ ((3*np->rx_target) / 4)) &&
+ (--np->rx_target < np->rx_min_target))
+ np->rx_target = np->rx_min_target;
+
+ xennet_alloc_rx_buffers(dev);
+
+ *pbudget -= work_done;
+ dev->quota -= work_done;
+
+ if (work_done < budget) {
+ local_irq_save(flags);
+
+ RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
+ if (!more_to_do)
+ __netif_rx_complete(dev);
+
+ local_irq_restore(flags);
+ }
+
+ spin_unlock(&np->rx_lock);
+
+ return more_to_do;
+}
+
+static int xennet_change_mtu(struct net_device *dev, int mtu)
+{
+ int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
+
+ if (mtu > max)
+ return -EINVAL;
+ dev->mtu = mtu;
+ return 0;
+}
+
+static void xennet_release_tx_bufs(struct netfront_info *np)
+{
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < NET_TX_RING_SIZE; i++) {
+ /* Skip over entries which are actually freelist references */
+ if ((unsigned long)np->tx_skbs[i].skb < PAGE_OFFSET)
+ continue;
+
+ skb = np->tx_skbs[i].skb;
+ gnttab_end_foreign_access_ref(np->grant_tx_ref[i],
+ GNTMAP_readonly);
+ gnttab_release_grant_reference(&np->gref_tx_head,
+ np->grant_tx_ref[i]);
+ np->grant_tx_ref[i] = GRANT_INVALID_REF;
+ add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
+ dev_kfree_skb_irq(skb);
+ }
+}
+
+static void xennet_release_rx_bufs(struct netfront_info *np)
+{
+ struct mmu_update *mmu = np->rx_mmu;
+ struct multicall_entry *mcl = np->rx_mcl;
+ struct sk_buff_head free_list;
+ struct sk_buff *skb;
+ unsigned long mfn;
+ int xfer = 0, noxfer = 0, unused = 0;
+ int id, ref;
+
+ dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
+ __func__);
+ return;
+
+ skb_queue_head_init(&free_list);
+
+ spin_lock_bh(&np->rx_lock);
+
+ for (id = 0; id < NET_RX_RING_SIZE; id++) {
+ ref = np->grant_rx_ref[id];
+ if (ref == GRANT_INVALID_REF) {
+ unused++;
+ continue;
+ }
+
+ skb = np->rx_skbs[id];
+ mfn = gnttab_end_foreign_transfer_ref(ref);
+ gnttab_release_grant_reference(&np->gref_rx_head, ref);
+ np->grant_rx_ref[id] = GRANT_INVALID_REF;
+
+ if (0 == mfn) {
+ skb_shinfo(skb)->nr_frags = 0;
+ dev_kfree_skb(skb);
+ noxfer++;
+ continue;
+ }
+
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ /* Remap the page. */
+ struct page *page = skb_shinfo(skb)->frags[0].page;
+ unsigned long pfn = page_to_pfn(page);
+ void *vaddr = page_address(page);
+
+ MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
+ mfn_pte(mfn, PAGE_KERNEL),
+ 0);
+ mcl++;
+ mmu->ptr = ((u64)mfn << PAGE_SHIFT)
+ | MMU_MACHPHYS_UPDATE;
+ mmu->val = pfn;
+ mmu++;
+
+ set_phys_to_machine(pfn, mfn);
+ }
+ __skb_queue_tail(&free_list, skb);
+ xfer++;
+ }
+
+ dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n",
+ __func__, xfer, noxfer, unused);
+
+ if (xfer) {
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ /* Do all the remapping work and M2P updates. */
+ MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
+ 0, DOMID_SELF);
+ mcl++;
+ HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
+ }
+ }
+
+ while ((skb = __skb_dequeue(&free_list)) != NULL)
+ dev_kfree_skb(skb);
+
+ spin_unlock_bh(&np->rx_lock);
+}
+
+static void xennet_uninit(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ xennet_release_tx_bufs(np);
+ xennet_release_rx_bufs(np);
+ gnttab_free_grant_references(np->gref_tx_head);
+ gnttab_free_grant_references(np->gref_rx_head);
+}
+
+static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev)
+{
+ int i, err;
+ struct net_device *netdev;
+ struct netfront_info *np;
+
+ netdev = alloc_etherdev(sizeof(struct netfront_info));
+ if (!netdev) {
+ printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
+ __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ np = netdev_priv(netdev);
+ np->xbdev = dev;
+
+ spin_lock_init(&np->tx_lock);
+ spin_lock_init(&np->rx_lock);
+
+ skb_queue_head_init(&np->rx_batch);
+ np->rx_target = RX_DFL_MIN_TARGET;
+ np->rx_min_target = RX_DFL_MIN_TARGET;
+ np->rx_max_target = RX_MAX_TARGET;
+
+ init_timer(&np->rx_refill_timer);
+ np->rx_refill_timer.data = (unsigned long)netdev;
+ np->rx_refill_timer.function = rx_refill_timeout;
+
+ /* Initialise tx_skbs as a free chain containing every entry. */
+ np->tx_skb_freelist = 0;
+ for (i = 0; i < NET_TX_RING_SIZE; i++) {
+ np->tx_skbs[i].link = i+1;
+ np->grant_tx_ref[i] = GRANT_INVALID_REF;
+ }
+
+ /* Clear out rx_skbs */
+ for (i = 0; i < NET_RX_RING_SIZE; i++) {
+ np->rx_skbs[i] = NULL;
+ np->grant_rx_ref[i] = GRANT_INVALID_REF;
+ }
+
+ /* A grant for every tx ring slot */
+ if (gnttab_alloc_grant_references(TX_MAX_TARGET,
+ &np->gref_tx_head) < 0) {
+ printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+ /* A grant for every rx ring slot */
+ if (gnttab_alloc_grant_references(RX_MAX_TARGET,
+ &np->gref_rx_head) < 0) {
+ printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
+ err = -ENOMEM;
+ goto exit_free_tx;
+ }
+
+ netdev->open = xennet_open;
+ netdev->hard_start_xmit = xennet_start_xmit;
+ netdev->stop = xennet_close;
+ netdev->get_stats = xennet_get_stats;
+ netdev->poll = xennet_poll;
+ netdev->uninit = xennet_uninit;
+ netdev->change_mtu = xennet_change_mtu;
+ netdev->weight = 64;
+ netdev->features = NETIF_F_IP_CSUM;
+
+ SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
+ SET_MODULE_OWNER(netdev);
+ SET_NETDEV_DEV(netdev, &dev->dev);
+
+ np->netdev = netdev;
+
+ netif_carrier_off(netdev);
+
+ return netdev;
+
+ exit_free_tx:
+ gnttab_free_grant_references(np->gref_tx_head);
+ exit:
+ free_netdev(netdev);
+ return ERR_PTR(err);
+}
+
+/**
+ * Entry point to this code when a new device is created. Allocate the basic
+ * structures and the ring buffers for communication with the backend, and
+ * inform the backend of the appropriate details for those.
+ */
+static int __devinit netfront_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ int err;
+ struct net_device *netdev;
+ struct netfront_info *info;
+
+ netdev = xennet_create_dev(dev);
+ if (IS_ERR(netdev)) {
+ err = PTR_ERR(netdev);
+ xenbus_dev_fatal(dev, err, "creating netdev");
+ return err;
+ }
+
+ info = netdev_priv(netdev);
+ dev->dev.driver_data = info;
+
+ err = register_netdev(info->netdev);
+ if (err) {
+ printk(KERN_WARNING "%s: register_netdev err=%d\n",
+ __func__, err);
+ goto fail;
+ }
+
+ err = xennet_sysfs_addif(info->netdev);
+ if (err) {
+ unregister_netdev(info->netdev);
+ printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
+ __func__, err);
+ goto fail;
+ }
+
+ return 0;
+
+ fail:
+ free_netdev(netdev);
+ dev->dev.driver_data = NULL;
+ return err;
+}
+
+static void xennet_end_access(int ref, void *page)
+{
+ /* This frees the page as a side-effect */
+ if (ref != GRANT_INVALID_REF)
+ gnttab_end_foreign_access(ref, 0, (unsigned long)page);
+}
+
+static void xennet_disconnect_backend(struct netfront_info *info)
+{
+ /* Stop old i/f to prevent errors whilst we rebuild the state. */
+ spin_lock_bh(&info->rx_lock);
+ spin_lock_irq(&info->tx_lock);
+ netif_carrier_off(info->netdev);
+ spin_unlock_irq(&info->tx_lock);
+ spin_unlock_bh(&info->rx_lock);
+
+ if (info->netdev->irq)
+ unbind_from_irqhandler(info->netdev->irq, info->netdev);
+ info->evtchn = info->netdev->irq = 0;
+
+ /* End access and free the pages */
+ xennet_end_access(info->tx_ring_ref, info->tx.sring);
+ xennet_end_access(info->rx_ring_ref, info->rx.sring);
+
+ info->tx_ring_ref = GRANT_INVALID_REF;
+ info->rx_ring_ref = GRANT_INVALID_REF;
+ info->tx.sring = NULL;
+ info->rx.sring = NULL;
+}
+
+/**
+ * We are reconnecting to the backend, due to a suspend/resume, or a backend
+ * driver restart. We tear down our netif structure and recreate it, but
+ * leave the device-layer structures intact so that this is transparent to the
+ * rest of the kernel.
+ */
+static int netfront_resume(struct xenbus_device *dev)
+{
+ struct netfront_info *info = dev->dev.driver_data;
+
+ dev_dbg(&dev->dev, "%s\n", dev->nodename);
+
+ xennet_disconnect_backend(info);
+ return 0;
+}
+
+static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
+{
+ char *s, *e, *macstr;
+ int i;
+
+ macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
+ if (IS_ERR(macstr))
+ return PTR_ERR(macstr);
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ mac[i] = simple_strtoul(s, &e, 16);
+ if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
+ kfree(macstr);
+ return -ENOENT;
+ }
+ s = e+1;
+ }
+
+ kfree(macstr);
+ return 0;
+}
+
+static irqreturn_t xennet_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct netfront_info *np = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&np->tx_lock, flags);
+
+ if (likely(netif_carrier_ok(dev))) {
+ xennet_tx_buf_gc(dev);
+ /* Under tx_lock: protects access to rx shared-ring indexes. */
+ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
+ netif_rx_schedule(dev);
+ }
+
+ spin_unlock_irqrestore(&np->tx_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
+{
+ struct xen_netif_tx_sring *txs;
+ struct xen_netif_rx_sring *rxs;
+ int err;
+ struct net_device *netdev = info->netdev;
+
+ info->tx_ring_ref = GRANT_INVALID_REF;
+ info->rx_ring_ref = GRANT_INVALID_REF;
+ info->rx.sring = NULL;
+ info->tx.sring = NULL;
+ netdev->irq = 0;
+
+ err = xen_net_read_mac(dev, netdev->dev_addr);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
+ goto fail;
+ }
+
+ txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_KERNEL);
+ if (!txs) {
+ err = -ENOMEM;
+ xenbus_dev_fatal(dev, err, "allocating tx ring page");
+ goto fail;
+ }
+ SHARED_RING_INIT(txs);
+ FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
+
+ err = xenbus_grant_ring(dev, virt_to_mfn(txs));
+ if (err < 0) {
+ free_page((unsigned long)txs);
+ goto fail;
+ }
+
+ info->tx_ring_ref = err;
+ rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_KERNEL);
+ if (!rxs) {
+ err = -ENOMEM;
+ xenbus_dev_fatal(dev, err, "allocating rx ring page");
+ goto fail;
+ }
+ SHARED_RING_INIT(rxs);
+ FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
+
+ err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
+ if (err < 0) {
+ free_page((unsigned long)rxs);
+ goto fail;
+ }
+ info->rx_ring_ref = err;
+
+ err = xenbus_alloc_evtchn(dev, &info->evtchn);
+ if (err)
+ goto fail;
+
+ err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt,
+ IRQF_SAMPLE_RANDOM, netdev->name,
+ netdev);
+ if (err < 0)
+ goto fail;
+ netdev->irq = err;
+ return 0;
+
+ fail:
+ return err;
+}
+
+/* Common code used when first setting up, and when resuming. */
+static int talk_to_backend(struct xenbus_device *dev,
+ struct netfront_info *info)
+{
+ const char *message;
+ struct xenbus_transaction xbt;
+ int err;
+
+ /* Create shared ring, alloc event channel. */
+ err = setup_netfront(dev, info);
+ if (err)
+ goto out;
+
+again:
+ err = xenbus_transaction_start(&xbt);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "starting transaction");
+ goto destroy_ring;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u",
+ info->tx_ring_ref);
+ if (err) {
+ message = "writing tx ring-ref";
+ goto abort_transaction;
+ }
+ err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
+ info->rx_ring_ref);
+ if (err) {
+ message = "writing rx ring-ref";
+ goto abort_transaction;
+ }
+ err = xenbus_printf(xbt, dev->nodename,
+ "event-channel", "%u", info->evtchn);
+ if (err) {
+ message = "writing event-channel";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
+ 1);
+ if (err) {
+ message = "writing request-rx-copy";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
+ if (err) {
+ message = "writing feature-rx-notify";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
+ if (err) {
+ message = "writing feature-sg";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
+ if (err) {
+ message = "writing feature-gso-tcpv4";
+ goto abort_transaction;
+ }
+
+ err = xenbus_transaction_end(xbt, 0);
+ if (err) {
+ if (err == -EAGAIN)
+ goto again;
+ xenbus_dev_fatal(dev, err, "completing transaction");
+ goto destroy_ring;
+ }
+
+ return 0;
+
+ abort_transaction:
+ xenbus_transaction_end(xbt, 1);
+ xenbus_dev_fatal(dev, err, "%s", message);
+ destroy_ring:
+ xennet_disconnect_backend(info);
+ out:
+ return err;
+}
+
+static int xennet_set_sg(struct net_device *dev, u32 data)
+{
+ if (data) {
+ struct netfront_info *np = netdev_priv(dev);
+ int val;
+
+ if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
+ "%d", &val) < 0)
+ val = 0;
+ if (!val)
+ return -ENOSYS;
+ } else if (dev->mtu > ETH_DATA_LEN)
+ dev->mtu = ETH_DATA_LEN;
+
+ return ethtool_op_set_sg(dev, data);
+}
+
+static int xennet_set_tso(struct net_device *dev, u32 data)
+{
+ if (data) {
+ struct netfront_info *np = netdev_priv(dev);
+ int val;
+
+ if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
+ "feature-gso-tcpv4", "%d", &val) < 0)
+ val = 0;
+ if (!val)
+ return -ENOSYS;
+ }
+
+ return ethtool_op_set_tso(dev, data);
+}
+
+static void xennet_set_features(struct net_device *dev)
+{
+ /* Turn off all GSO bits except ROBUST. */
+ dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1;
+ dev->features |= NETIF_F_GSO_ROBUST;
+ xennet_set_sg(dev, 0);
+
+ /* We need checksum offload to enable scatter/gather and TSO. */
+ if (!(dev->features & NETIF_F_IP_CSUM))
+ return;
+
+ if (!xennet_set_sg(dev, 1))
+ xennet_set_tso(dev, 1);
+}
+
+static int xennet_connect(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ int i, requeue_idx, err;
+ struct sk_buff *skb;
+ grant_ref_t ref;
+ struct xen_netif_rx_request *req;
+ unsigned int feature_rx_copy;
+
+ err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
+ "feature-rx-copy", "%u", &feature_rx_copy);
+ if (err != 1)
+ feature_rx_copy = 0;
+
+ if (!feature_rx_copy) {
+ dev_info(&dev->dev,
+ "backend does not support copying recieve path");
+ return -ENODEV;
+ }
+
+ err = talk_to_backend(np->xbdev, np);
+ if (err)
+ return err;
+
+ xennet_set_features(dev);
+
+ spin_lock_bh(&np->rx_lock);
+ spin_lock_irq(&np->tx_lock);
+
+ /* Step 1: Discard all pending TX packet fragments. */
+ xennet_release_tx_bufs(np);
+
+ /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
+ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
+ if (!np->rx_skbs[i])
+ continue;
+
+ skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
+ ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
+ req = RING_GET_REQUEST(&np->rx, requeue_idx);
+
+ gnttab_grant_foreign_access_ref(
+ ref, np->xbdev->otherend_id,
+ pfn_to_mfn(page_to_pfn(skb_shinfo(skb)->
+ frags->page)),
+ 0);
+ req->gref = ref;
+ req->id = requeue_idx;
+
+ requeue_idx++;
+ }
+
+ np->rx.req_prod_pvt = requeue_idx;
+
+ /*
+ * Step 3: All public and private state should now be sane. Get
+ * ready to start sending and receiving packets and give the driver
+ * domain a kick because we've probably just requeued some
+ * packets.
+ */
+ netif_carrier_on(np->netdev);
+ notify_remote_via_irq(np->netdev->irq);
+ xennet_tx_buf_gc(dev);
+ xennet_alloc_rx_buffers(dev);
+
+ spin_unlock_irq(&np->tx_lock);
+ spin_unlock_bh(&np->rx_lock);
+
+ return 0;
+}
+
+/**
+ * Callback received when the backend's state changes.
+ */
+static void backend_changed(struct xenbus_device *dev,
+ enum xenbus_state backend_state)
+{
+ struct netfront_info *np = dev->dev.driver_data;
+ struct net_device *netdev = np->netdev;
+
+ dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
+
+ switch (backend_state) {
+ case XenbusStateInitialising:
+ case XenbusStateInitialised:
+ case XenbusStateConnected:
+ case XenbusStateUnknown:
+ case XenbusStateClosed:
+ break;
+
+ case XenbusStateInitWait:
+ if (dev->state != XenbusStateInitialising)
+ break;
+ if (xennet_connect(netdev) != 0)
+ break;
+ xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+
+ case XenbusStateClosing:
+ xenbus_frontend_closed(dev);
+ break;
+ }
+}
+
+static struct ethtool_ops xennet_ethtool_ops =
+{
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = xennet_set_sg,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = xennet_set_tso,
+ .get_link = ethtool_op_get_link,
+};
+
+#ifdef CONFIG_SYSFS
+static ssize_t show_rxbuf_min(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ struct netfront_info *info = netdev_priv(netdev);
+
+ return sprintf(buf, "%u\n", info->rx_min_target);
+}
+
+static ssize_t store_rxbuf_min(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ struct netfront_info *np = netdev_priv(netdev);
+ char *endp;
+ unsigned long target;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ target = simple_strtoul(buf, &endp, 0);
+ if (endp == buf)
+ return -EBADMSG;
+
+ if (target < RX_MIN_TARGET)
+ target = RX_MIN_TARGET;
+ if (target > RX_MAX_TARGET)
+ target = RX_MAX_TARGET;
+
+ spin_lock_bh(&np->rx_lock);
+ if (target > np->rx_max_target)
+ np->rx_max_target = target;
+ np->rx_min_target = target;
+ if (target > np->rx_target)
+ np->rx_target = target;
+
+ xennet_alloc_rx_buffers(netdev);
+
+ spin_unlock_bh(&np->rx_lock);
+ return len;
+}
+
+static ssize_t show_rxbuf_max(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ struct netfront_info *info = netdev_priv(netdev);
+
+ return sprintf(buf, "%u\n", info->rx_max_target);
+}
+
+static ssize_t store_rxbuf_max(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ struct netfront_info *np = netdev_priv(netdev);
+ char *endp;
+ unsigned long target;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ target = simple_strtoul(buf, &endp, 0);
+ if (endp == buf)
+ return -EBADMSG;
+
+ if (target < RX_MIN_TARGET)
+ target = RX_MIN_TARGET;
+ if (target > RX_MAX_TARGET)
+ target = RX_MAX_TARGET;
+
+ spin_lock_bh(&np->rx_lock);
+ if (target < np->rx_min_target)
+ np->rx_min_target = target;
+ np->rx_max_target = target;
+ if (target < np->rx_target)
+ np->rx_target = target;
+
+ xennet_alloc_rx_buffers(netdev);
+
+ spin_unlock_bh(&np->rx_lock);
+ return len;
+}
+
+static ssize_t show_rxbuf_cur(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ struct netfront_info *info = netdev_priv(netdev);
+
+ return sprintf(buf, "%u\n", info->rx_target);
+}
+
+static struct device_attribute xennet_attrs[] = {
+ __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
+ __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
+ __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
+};
+
+static int xennet_sysfs_addif(struct net_device *netdev)
+{
+ int i;
+ int err;
+
+ for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
+ err = device_create_file(&netdev->dev,
+ &xennet_attrs[i]);
+ if (err)
+ goto fail;
+ }
+ return 0;
+
+ fail:
+ while (--i >= 0)
+ device_remove_file(&netdev->dev, &xennet_attrs[i]);
+ return err;
+}
+
+static void xennet_sysfs_delif(struct net_device *netdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
+ device_remove_file(&netdev->dev, &xennet_attrs[i]);
+}
+
+#endif /* CONFIG_SYSFS */
+
+static struct xenbus_device_id netfront_ids[] = {
+ { "vif" },
+ { "" }
+};
+
+
+static int __devexit xennet_remove(struct xenbus_device *dev)
+{
+ struct netfront_info *info = dev->dev.driver_data;
+
+ dev_dbg(&dev->dev, "%s\n", dev->nodename);
+
+ unregister_netdev(info->netdev);
+
+ xennet_disconnect_backend(info);
+
+ del_timer_sync(&info->rx_refill_timer);
+
+ xennet_sysfs_delif(info->netdev);
+
+ free_netdev(info->netdev);
+
+ return 0;
+}
+
+static struct xenbus_driver netfront = {
+ .name = "vif",
+ .owner = THIS_MODULE,
+ .ids = netfront_ids,
+ .probe = netfront_probe,
+ .remove = __devexit_p(xennet_remove),
+ .resume = netfront_resume,
+ .otherend_changed = backend_changed,
+};
+
+static int __init netif_init(void)
+{
+ if (!is_running_on_xen())
+ return -ENODEV;
+
+ if (is_initial_xendomain())
+ return 0;
+
+ printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n");
+
+ return xenbus_register_frontend(&netfront);
+}
+module_init(netif_init);
+
+
+static void __exit netif_exit(void)
+{
+ if (is_initial_xendomain())
+ return;
+
+ return xenbus_unregister_driver(&netfront);
+}
+module_exit(netif_exit);
+
+MODULE_DESCRIPTION("Xen virtual network device frontend");
+MODULE_LICENSE("GPL");