diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-10-15 15:03:17 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-10-15 15:03:17 -0700 |
commit | c3da31485f074a6f598b67045b08e2e15d908310 (patch) | |
tree | 64f9ad3d3752e80de2b22b47cbea8f8512dc5d59 | |
parent | bd0704111e625ebe75418531550cf471215c3267 (diff) | |
parent | 8f7e524ce33ca81b663711404709396165da3cbd (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (53 commits)
vmxnet: fix 2 build problems
net: add support for STMicroelectronics Ethernet controllers.
net: ks8851_mll uses mii interfaces
net/fec_mpc52xx: Fix kernel panic on FEC error
net: Fix OF platform drivers coldplug/hotplug when compiled as modules
TI DaVinci EMAC: Clear statistics register properly.
r8169: partial support and phy init for the 8168d
irda/sa1100_ir: check return value of startup hook
udp: Fix udp_poll() and ioctl()
WAN: fix Cisco HDLC handshaking.
tcp: fix tcp_defer_accept to consider the timeout
3c574_cs: spin_lock the set_multicast_list function
net: Teach pegasus driver to ignore bluetoother adapters with clashing Vendor:Product IDs
netxen: fix pci bar mapping
ethoc: fix warning from 32bit build
libertas: fix build
net: VMware virtual Ethernet NIC driver: vmxnet3
net: Fix IXP 2000 network driver building.
libertas: fix build
mac80211: document ieee80211_rx() context requirement
...
80 files changed, 10844 insertions, 345 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index d5eb8c13ef0..88241154f4c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3667,6 +3667,7 @@ NETWORKING [GENERAL] M: "David S. Miller" <davem@davemloft.net> L: netdev@vger.kernel.org W: http://www.linuxfoundation.org/en/Net +W: http://patchwork.ozlabs.org/project/netdev/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git S: Maintained F: net/ @@ -5664,6 +5665,13 @@ S: Maintained F: drivers/vlynq/vlynq.c F: include/linux/vlynq.h +VMWARE VMXNET3 ETHERNET DRIVER +M: Shreyas Bhatewara <sbhatewara@vmware.com> +M: VMware, Inc. <pv-drivers@vmware.com> +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/vmxnet3/ + VOLTAGE AND CURRENT REGULATOR FRAMEWORK M: Liam Girdwood <lrg@slimlogic.co.uk> M: Mark Brown <broonie@opensource.wolfsonmicro.com> diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 712776089b4..e19ca4bb751 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1741,6 +1741,7 @@ config KS8851 config KS8851_MLL tristate "Micrel KS8851 MLL" depends on HAS_IOMEM + select MII help This platform driver is for Micrel KS8851 Address/data bus multiplexed network chip. @@ -2482,6 +2483,8 @@ config S6GMAC To compile this driver as a module, choose M here. The module will be called s6gmac. +source "drivers/net/stmmac/Kconfig" + endif # NETDEV_1000 # @@ -3230,4 +3233,12 @@ config VIRTIO_NET This is the virtual network driver for virtio. It can be used with lguest or QEMU based VMMs (like KVM or Xen). Say Y or M. +config VMXNET3 + tristate "VMware VMXNET3 ethernet driver" + depends on PCI && X86 && INET + help + This driver supports VMware's vmxnet3 virtual ethernet NIC. + To compile this driver as a module, choose M here: the + module will be called vmxnet3. + endif # NETDEVICES diff --git a/drivers/net/Makefile b/drivers/net/Makefile index d866b8cf65d..246323d7f16 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -2,6 +2,10 @@ # Makefile for the Linux network (ethercard) device drivers. # +obj-$(CONFIG_MII) += mii.o +obj-$(CONFIG_MDIO) += mdio.o +obj-$(CONFIG_PHYLIB) += phy/ + obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o obj-$(CONFIG_E1000) += e1000/ @@ -26,6 +30,7 @@ obj-$(CONFIG_TEHUTI) += tehuti.o obj-$(CONFIG_ENIC) += enic/ obj-$(CONFIG_JME) += jme.o obj-$(CONFIG_BE2NET) += benet/ +obj-$(CONFIG_VMXNET3) += vmxnet3/ gianfar_driver-objs := gianfar.o \ gianfar_ethtool.o \ @@ -95,15 +100,12 @@ obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o obj-$(CONFIG_RIONET) += rionet.o obj-$(CONFIG_SH_ETH) += sh_eth.o +obj-$(CONFIG_STMMAC_ETH) += stmmac/ # # end link order section # -obj-$(CONFIG_MII) += mii.o -obj-$(CONFIG_MDIO) += mdio.o -obj-$(CONFIG_PHYLIB) += phy/ - obj-$(CONFIG_SUNDANCE) += sundance.o obj-$(CONFIG_HAMACHI) += hamachi.o obj-$(CONFIG_NET) += Space.o loopback.o diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c index 5f0b05c2d71..d82a9a99475 100644 --- a/drivers/net/acenic.c +++ b/drivers/net/acenic.c @@ -1209,7 +1209,8 @@ static int __devinit ace_init(struct net_device *dev) memset(ap->info, 0, sizeof(struct ace_info)); memset(ap->skb, 0, sizeof(struct ace_skb)); - if (ace_load_firmware(dev)) + ecode = ace_load_firmware(dev); + if (ecode) goto init_error; ap->fw_running = 0; diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c index 3373560405b..9dd076a626a 100644 --- a/drivers/net/can/sja1000/sja1000_of_platform.c +++ b/drivers/net/can/sja1000/sja1000_of_platform.c @@ -213,6 +213,7 @@ static struct of_device_id __devinitdata sja1000_ofp_table[] = { {.compatible = "nxp,sja1000"}, {}, }; +MODULE_DEVICE_TABLE(of, sja1000_ofp_table); static struct of_platform_driver sja1000_ofp_driver = { .owner = THIS_MODULE, diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c index 65a2d0ba64e..f72c56dec33 100644 --- a/drivers/net/davinci_emac.c +++ b/drivers/net/davinci_emac.c @@ -333,6 +333,9 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; #define EMAC_DM646X_MAC_EOI_C0_RXEN (0x01) #define EMAC_DM646X_MAC_EOI_C0_TXEN (0x02) +/* EMAC Stats Clear Mask */ +#define EMAC_STATS_CLR_MASK (0xFFFFFFFF) + /** net_buf_obj: EMAC network bufferdata structure * * EMAC network buffer data structure @@ -2548,40 +2551,49 @@ static int emac_dev_stop(struct net_device *ndev) static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev) { struct emac_priv *priv = netdev_priv(ndev); + u32 mac_control; + u32 stats_clear_mask; /* update emac hardware stats and reset the registers*/ + mac_control = emac_read(EMAC_MACCONTROL); + + if (mac_control & EMAC_MACCONTROL_GMIIEN) + stats_clear_mask = EMAC_STATS_CLR_MASK; + else + stats_clear_mask = 0; + priv->net_dev_stats.multicast += emac_read(EMAC_RXMCASTFRAMES); - emac_write(EMAC_RXMCASTFRAMES, EMAC_ALL_MULTI_REG_VALUE); + emac_write(EMAC_RXMCASTFRAMES, stats_clear_mask); priv->net_dev_stats.collisions += (emac_read(EMAC_TXCOLLISION) + emac_read(EMAC_TXSINGLECOLL) + emac_read(EMAC_TXMULTICOLL)); - emac_write(EMAC_TXCOLLISION, EMAC_ALL_MULTI_REG_VALUE); - emac_write(EMAC_TXSINGLECOLL, EMAC_ALL_MULTI_REG_VALUE); - emac_write(EMAC_TXMULTICOLL, EMAC_ALL_MULTI_REG_VALUE); + emac_write(EMAC_TXCOLLISION, stats_clear_mask); + emac_write(EMAC_TXSINGLECOLL, stats_clear_mask); + emac_write(EMAC_TXMULTICOLL, stats_clear_mask); priv->net_dev_stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) + emac_read(EMAC_RXJABBER) + emac_read(EMAC_RXUNDERSIZED)); - emac_write(EMAC_RXOVERSIZED, EMAC_ALL_MULTI_REG_VALUE); - emac_write(EMAC_RXJABBER, EMAC_ALL_MULTI_REG_VALUE); - emac_write(EMAC_RXUNDERSIZED, EMAC_ALL_MULTI_REG_VALUE); + emac_write(EMAC_RXOVERSIZED, stats_clear_mask); + emac_write(EMAC_RXJABBER, stats_clear_mask); + emac_write(EMAC_RXUNDERSIZED, stats_clear_mask); priv->net_dev_stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) + emac_read(EMAC_RXMOFOVERRUNS)); - emac_write(EMAC_RXSOFOVERRUNS, EMAC_ALL_MULTI_REG_VALUE); - emac_write(EMAC_RXMOFOVERRUNS, EMAC_ALL_MULTI_REG_VALUE); + emac_write(EMAC_RXSOFOVERRUNS, stats_clear_mask); + emac_write(EMAC_RXMOFOVERRUNS, stats_clear_mask); priv->net_dev_stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS); - emac_write(EMAC_RXDMAOVERRUNS, EMAC_ALL_MULTI_REG_VALUE); + emac_write(EMAC_RXDMAOVERRUNS, stats_clear_mask); priv->net_dev_stats.tx_carrier_errors += emac_read(EMAC_TXCARRIERSENSE); - emac_write(EMAC_TXCARRIERSENSE, EMAC_ALL_MULTI_REG_VALUE); + emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask); priv->net_dev_stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN); - emac_write(EMAC_TXUNDERRUN, EMAC_ALL_MULTI_REG_VALUE); + emac_write(EMAC_TXUNDERRUN, stats_clear_mask); return &priv->net_dev_stats; } diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c index 96f5b2a2d2c..9c950bb5e90 100644 --- a/drivers/net/ethoc.c +++ b/drivers/net/ethoc.c @@ -664,7 +664,8 @@ static int ethoc_open(struct net_device *dev) return ret; /* calculate the number of TX/RX buffers, maximum 128 supported */ - num_bd = min(128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ); + num_bd = min_t(unsigned int, + 128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ); priv->num_tx = max(min_tx, num_bd / 4); priv->num_rx = num_bd - priv->num_tx; ethoc_write(priv, TX_BD_NUM, priv->num_tx); diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c index c40113f5896..66dace6d324 100644 --- a/drivers/net/fec_mpc52xx.c +++ b/drivers/net/fec_mpc52xx.c @@ -759,12 +759,6 @@ static void mpc52xx_fec_reset(struct net_device *dev) mpc52xx_fec_hw_init(dev); - if (priv->phydev) { - phy_stop(priv->phydev); - phy_write(priv->phydev, MII_BMCR, BMCR_RESET); - phy_start(priv->phydev); - } - bcom_fec_rx_reset(priv->rx_dmatsk); bcom_fec_tx_reset(priv->tx_dmatsk); diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c index 31e6d62b785..ee0f3c6d3f8 100644 --- a/drivers/net/fec_mpc52xx_phy.c +++ b/drivers/net/fec_mpc52xx_phy.c @@ -155,6 +155,7 @@ static struct of_device_id mpc52xx_fec_mdio_match[] = { { .compatible = "mpc5200b-fec-phy", }, {} }; +MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match); struct of_platform_driver mpc52xx_fec_mdio_driver = { .name = "mpc5200b-fec-phy", diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index 2bc2d2b2064..ec2f5034457 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c @@ -1110,6 +1110,7 @@ static struct of_device_id fs_enet_match[] = { #endif {} }; +MODULE_DEVICE_TABLE(of, fs_enet_match); static struct of_platform_driver fs_enet_driver = { .name = "fs_enet", diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c index 93b481b0e3c..24ff9f43a62 100644 --- a/drivers/net/fs_enet/mii-bitbang.c +++ b/drivers/net/fs_enet/mii-bitbang.c @@ -221,6 +221,7 @@ static struct of_device_id fs_enet_mdio_bb_match[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match); static struct of_platform_driver fs_enet_bb_mdio_driver = { .name = "fsl-bb-mdio", diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c index a2d69c1cd07..96eba4280c5 100644 --- a/drivers/net/fs_enet/mii-fec.c +++ b/drivers/net/fs_enet/mii-fec.c @@ -219,6 +219,7 @@ static struct of_device_id fs_enet_mdio_fec_match[] = { #endif {}, }; +MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match); static struct of_platform_driver fs_enet_fec_mdio_driver = { .name = "fsl-fec-mdio", diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c index d167090248e..6ac46486697 100644 --- a/drivers/net/fsl_pq_mdio.c +++ b/drivers/net/fsl_pq_mdio.c @@ -407,6 +407,7 @@ static struct of_device_id fsl_pq_mdio_match[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match); static struct of_platform_driver fsl_pq_mdio_driver = { .name = "fsl-pq_mdio", diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 1e5289ffef6..5bf31f1509c 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -2325,9 +2325,6 @@ static irqreturn_t gfar_error(int irq, void *dev_id) return IRQ_HANDLED; } -/* work with hotplug and coldplug */ -MODULE_ALIAS("platform:fsl-gianfar"); - static struct of_device_id gfar_match[] = { { @@ -2336,6 +2333,7 @@ static struct of_device_id gfar_match[] = }, {}, }; +MODULE_DEVICE_TABLE(of, gfar_match); /* Structure for a device driver */ static struct of_platform_driver gfar_driver = { diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index 89c82c5e63e..3fae8755979 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c @@ -24,6 +24,7 @@ * */ +#include <linux/module.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/errno.h> @@ -443,7 +444,7 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s ret |= EMAC_MR1_TFS_2K; break; default: - printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n", + printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n", dev->ndev->name, tx_size); } @@ -470,6 +471,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_ DBG2(dev, "__emac4_calc_base_mr1" NL); switch(tx_size) { + case 16384: + ret |= EMAC4_MR1_TFS_16K; + break; case 4096: ret |= EMAC4_MR1_TFS_4K; break; @@ -477,7 +481,7 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_ ret |= EMAC4_MR1_TFS_2K; break; default: - printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n", + printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n", dev->ndev->name, tx_size); } @@ -2985,6 +2989,7 @@ static struct of_device_id emac_match[] = }, {}, }; +MODULE_DEVICE_TABLE(of, emac_match); static struct of_platform_driver emac_driver = { .name = "emac", diff --git a/drivers/net/ibm_newemac/emac.h b/drivers/net/ibm_newemac/emac.h index 0afc2cf5c52..d34adf99fc6 100644 --- a/drivers/net/ibm_newemac/emac.h +++ b/drivers/net/ibm_newemac/emac.h @@ -153,6 +153,7 @@ struct emac_regs { #define EMAC4_MR1_RFS_16K 0x00280000 #define EMAC4_MR1_TFS_2K 0x00020000 #define EMAC4_MR1_TFS_4K 0x00030000 +#define EMAC4_MR1_TFS_16K 0x00050000 #define EMAC4_MR1_TR 0x00008000 #define EMAC4_MR1_MWSW_001 0x00001000 #define EMAC4_MR1_JPSM 0x00000800 diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index 38bf7cf2256..c412e802617 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -232,8 +232,11 @@ static int sa1100_irda_startup(struct sa1100_irda *si) /* * Ensure that the ports for this device are setup correctly. */ - if (si->pdata->startup) - si->pdata->startup(si->dev); + if (si->pdata->startup) { + ret = si->pdata->startup(si->dev); + if (ret) + return ret; + } /* * Configure PPC for IRDA - we want to drive TXD2 low. diff --git a/drivers/net/ixp2000/enp2611.c b/drivers/net/ixp2000/enp2611.c index b02a981c87a..34a6cfd1793 100644 --- a/drivers/net/ixp2000/enp2611.c +++ b/drivers/net/ixp2000/enp2611.c @@ -119,24 +119,9 @@ static struct ixp2400_msf_parameters enp2611_msf_parameters = } }; -struct enp2611_ixpdev_priv -{ - struct ixpdev_priv ixpdev_priv; - struct net_device_stats stats; -}; - static struct net_device *nds[3]; static struct timer_list link_check_timer; -static struct net_device_stats *enp2611_get_stats(struct net_device *dev) -{ - struct enp2611_ixpdev_priv *ip = netdev_priv(dev); - - pm3386_get_stats(ip->ixpdev_priv.channel, &(ip->stats)); - - return &(ip->stats); -} - /* @@@ Poll the SFP moddef0 line too. */ /* @@@ Try to use the pm3386 DOOL interrupt as well. */ static void enp2611_check_link_status(unsigned long __dummy) @@ -203,14 +188,13 @@ static int __init enp2611_init_module(void) ports = pm3386_port_count(); for (i = 0; i < ports; i++) { - nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv)); + nds[i] = ixpdev_alloc(i, sizeof(struct ixpdev_priv)); if (nds[i] == NULL) { while (--i >= 0) free_netdev(nds[i]); return -ENOMEM; } - nds[i]->get_stats = enp2611_get_stats; pm3386_init_port(i); pm3386_get_mac(i, nds[i]->dev_addr); } diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c index 127243461a5..9aee0cc922c 100644 --- a/drivers/net/ixp2000/ixpdev.c +++ b/drivers/net/ixp2000/ixpdev.c @@ -21,6 +21,7 @@ #include "ixp2400_tx.ucode" #include "ixpdev_priv.h" #include "ixpdev.h" +#include "pm3386.h" #define DRV_MODULE_VERSION "0.2" @@ -271,6 +272,15 @@ static int ixpdev_close(struct net_device *dev) return 0; } +static struct net_device_stats *ixpdev_get_stats(struct net_device *dev) +{ + struct ixpdev_priv *ip = netdev_priv(dev); + + pm3386_get_stats(ip->channel, &(dev->stats)); + + return &(dev->stats); +} + static const struct net_device_ops ixpdev_netdev_ops = { .ndo_open = ixpdev_open, .ndo_stop = ixpdev_close, @@ -278,6 +288,7 @@ static const struct net_device_ops ixpdev_netdev_ops = { .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, + .ndo_get_stats = ixpdev_get_stats, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixpdev_poll_controller, #endif diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 9b9eab10770..7fc15e9e8ad 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -595,7 +595,8 @@ netxen_setup_pci_map(struct netxen_adapter *adapter) void __iomem *mem_ptr2 = NULL; void __iomem *db_ptr = NULL; - unsigned long mem_base, mem_len, db_base, db_len = 0, pci_len0 = 0; + resource_size_t mem_base, db_base; + unsigned long mem_len, db_len = 0, pci_len0 = 0; struct pci_dev *pdev = adapter->pdev; int pci_func = adapter->ahw.pci_func; diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c index ee8ad3e180d..b58965a2b3a 100644 --- a/drivers/net/pcmcia/3c574_cs.c +++ b/drivers/net/pcmcia/3c574_cs.c @@ -251,6 +251,7 @@ static void el3_tx_timeout(struct net_device *dev); static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static const struct ethtool_ops netdev_ethtool_ops; static void set_rx_mode(struct net_device *dev); +static void set_multicast_list(struct net_device *dev); static void tc574_detach(struct pcmcia_device *p_dev); @@ -266,7 +267,7 @@ static const struct net_device_ops el3_netdev_ops = { .ndo_tx_timeout = el3_tx_timeout, .ndo_get_stats = el3_get_stats, .ndo_do_ioctl = el3_ioctl, - .ndo_set_multicast_list = set_rx_mode, + .ndo_set_multicast_list = set_multicast_list, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, @@ -1161,6 +1162,16 @@ static void set_rx_mode(struct net_device *dev) outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD); } +static void set_multicast_list(struct net_device *dev) +{ + struct el3_private *lp = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&lp->window_lock, flags); + set_rx_mode(dev); + spin_unlock_irqrestore(&lp->window_lock, flags); +} + static int el3_close(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 250e10f2c35..8659d341e76 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -238,6 +238,7 @@ static struct of_device_id mdio_ofgpio_match[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, mdio_ofgpio_match); static struct of_platform_driver mdio_ofgpio_driver = { .name = "mdio-gpio", diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h index 3ec6e85587a..e7285f01bd0 100644 --- a/drivers/net/qlge/qlge.h +++ b/drivers/net/qlge/qlge.h @@ -803,6 +803,12 @@ enum { MB_CMD_SET_PORT_CFG = 0x00000122, MB_CMD_GET_PORT_CFG = 0x00000123, MB_CMD_GET_LINK_STS = 0x00000124, + MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */ + MB_SET_MPI_TFK_STOP = (1 << 0), + MB_SET_MPI_TFK_RESUME = (1 << 1), + MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */ + MB_GET_MPI_TFK_STOPPED = (1 << 0), + MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1), /* Mailbox Command Status. */ MB_CMD_STS_GOOD = 0x00004000, /* Success. */ @@ -1168,7 +1174,7 @@ struct ricb { #define RSS_RI6 0x40 #define RSS_RT6 0x80 __le16 mask; - __le32 hash_cq_id[256]; + u8 hash_cq_id[1024]; __le32 ipv6_hash_key[10]; __le32 ipv4_hash_key[4]; } __attribute((packed)); @@ -1606,6 +1612,8 @@ int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data); int ql_mb_about_fw(struct ql_adapter *qdev); void ql_link_on(struct ql_adapter *qdev); void ql_link_off(struct ql_adapter *qdev); +int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control); +int ql_wait_fifo_empty(struct ql_adapter *qdev); #if 1 #define QL_ALL_DUMP diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 61680715cde..48b45df85ec 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c @@ -320,6 +320,37 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, switch (type) { case MAC_ADDR_TYPE_MULTI_MAC: + { + u32 upper = (addr[0] << 8) | addr[1]; + u32 lower = (addr[2] << 24) | (addr[3] << 16) | + (addr[4] << 8) | (addr[5]); + + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | + (index << MAC_ADDR_IDX_SHIFT) | + type | MAC_ADDR_E); + ql_write32(qdev, MAC_ADDR_DATA, lower); + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | + (index << MAC_ADDR_IDX_SHIFT) | + type | MAC_ADDR_E); + + ql_write32(qdev, MAC_ADDR_DATA, upper); + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + break; + } case MAC_ADDR_TYPE_CAM_MAC: { u32 cam_output; @@ -365,16 +396,14 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, and possibly the function id. Right now we hardcode the route field to NIC core. */ - if (type == MAC_ADDR_TYPE_CAM_MAC) { - cam_output = (CAM_OUT_ROUTE_NIC | - (qdev-> - func << CAM_OUT_FUNC_SHIFT) | - (0 << CAM_OUT_CQ_ID_SHIFT)); - if (qdev->vlgrp) - cam_output |= CAM_OUT_RV; - /* route to NIC core */ - ql_write32(qdev, MAC_ADDR_DATA, cam_output); - } + cam_output = (CAM_OUT_ROUTE_NIC | + (qdev-> + func << CAM_OUT_FUNC_SHIFT) | + (0 << CAM_OUT_CQ_ID_SHIFT)); + if (qdev->vlgrp) + cam_output |= CAM_OUT_RV; + /* route to NIC core */ + ql_write32(qdev, MAC_ADDR_DATA, cam_output); break; } case MAC_ADDR_TYPE_VLAN: @@ -546,14 +575,14 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask, } case RT_IDX_MCAST: /* Pass up All Multicast frames. */ { - value = RT_IDX_DST_CAM_Q | /* dest */ + value = RT_IDX_DST_DFLT_Q | /* dest */ RT_IDX_TYPE_NICQ | /* type */ (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */ break; } case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */ { - value = RT_IDX_DST_CAM_Q | /* dest */ + value = RT_IDX_DST_DFLT_Q | /* dest */ RT_IDX_TYPE_NICQ | /* type */ (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ break; @@ -3077,6 +3106,12 @@ err_irq: static int ql_start_rss(struct ql_adapter *qdev) { + u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, + 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, + 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, + 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, + 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, + 0xbe, 0xac, 0x01, 0xfa}; struct ricb *ricb = &qdev->ricb; int status = 0; int i; @@ -3086,21 +3121,17 @@ static int ql_start_rss(struct ql_adapter *qdev) ricb->base_cq = RSS_L4K; ricb->flags = - (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 | - RSS_RT6); - ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1); + (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6); + ricb->mask = cpu_to_le16((u16)(0x3ff)); /* * Fill out the Indirection Table. */ - for (i = 0; i < 256; i++) - hash_id[i] = i & (qdev->rss_ring_count - 1); + for (i = 0; i < 1024; i++) + hash_id[i] = (i & (qdev->rss_ring_count - 1)); - /* - * Random values for the IPv6 and IPv4 Hash Keys. - */ - get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40); - get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16); + memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40); + memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16); QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n"); @@ -3239,6 +3270,13 @@ static int ql_adapter_initialize(struct ql_adapter *qdev) ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP | min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE)); + /* Set RX packet routing to use port/pci function on which the + * packet arrived on in addition to usual frame routing. + * This is helpful on bonding where both interfaces can have + * the same MAC address. + */ + ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ); + /* Start up the rx queues. */ for (i = 0; i < qdev->rx_ring_count; i++) { status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); @@ -3311,6 +3349,13 @@ static int ql_adapter_reset(struct ql_adapter *qdev) end_jiffies = jiffies + max((unsigned long)1, usecs_to_jiffies(30)); + + /* Stop management traffic. */ + ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP); + + /* Wait for the NIC and MGMNT FIFOs to empty. */ + ql_wait_fifo_empty(qdev); + ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); do { @@ -3326,6 +3371,8 @@ static int ql_adapter_reset(struct ql_adapter *qdev) status = -ETIMEDOUT; } + /* Resume management traffic. */ + ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME); return status; } @@ -3704,6 +3751,12 @@ static void ql_asic_reset_work(struct work_struct *work) status = ql_adapter_up(qdev); if (status) goto error; + + /* Restore rx mode. */ + clear_bit(QL_ALLMULTI, &qdev->flags); + clear_bit(QL_PROMISCUOUS, &qdev->flags); + qlge_set_multicast_list(qdev->ndev); + rtnl_unlock(); return; error: diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c index c2e43073047..99e58e3f8e2 100644 --- a/drivers/net/qlge/qlge_mpi.c +++ b/drivers/net/qlge/qlge_mpi.c @@ -768,6 +768,95 @@ static int ql_idc_wait(struct ql_adapter *qdev) return status; } +int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 1; + mbcp->out_count = 2; + + mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL; + mbcp->mbox_in[1] = control; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) + return status; + + if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { + QPRINTK(qdev, DRV, ERR, + "Command not supported by firmware.\n"); + status = -EINVAL; + } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { + /* This indicates that the firmware is + * already in the state we are trying to + * change it to. + */ + QPRINTK(qdev, DRV, ERR, + "Command parameters make no change.\n"); + } + return status; +} + +/* Returns a negative error code or the mailbox command status. */ +static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + + memset(mbcp, 0, sizeof(struct mbox_params)); + *control = 0; + + mbcp->in_count = 1; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) { + *control = mbcp->mbox_in[1]; + return status; + } + + if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { + QPRINTK(qdev, DRV, ERR, + "Command not supported by firmware.\n"); + status = -EINVAL; + } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { + QPRINTK(qdev, DRV, ERR, + "Failed to get MPI traffic control.\n"); + status = -EIO; + } + return status; +} + +int ql_wait_fifo_empty(struct ql_adapter *qdev) +{ + int count = 5; + u32 mgmnt_fifo_empty; + u32 nic_fifo_empty; + + do { + nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE; + ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty); + mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY; + if (nic_fifo_empty && mgmnt_fifo_empty) + return 0; + msleep(100); + } while (count-- > 0); + return -ETIMEDOUT; +} + /* API called in work thread context to set new TX/RX * maximum frame size values to match MTU. */ @@ -876,6 +965,8 @@ void ql_mpi_work(struct work_struct *work) int err = 0; rtnl_lock(); + /* Begin polled mode for MPI */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); while (ql_read32(qdev, STS) & STS_PI) { memset(mbcp, 0, sizeof(struct mbox_params)); @@ -888,6 +979,8 @@ void ql_mpi_work(struct work_struct *work) break; } + /* End polled mode for MPI */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); rtnl_unlock(); ql_enable_completion_interrupt(qdev, 0); } diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 50c6a3cfe43..83c47d95c3a 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -115,7 +115,9 @@ enum mac_version { RTL_GIGA_MAC_VER_22 = 0x16, // 8168C RTL_GIGA_MAC_VER_23 = 0x17, // 8168CP RTL_GIGA_MAC_VER_24 = 0x18, // 8168CP - RTL_GIGA_MAC_VER_25 = 0x19 // 8168D + RTL_GIGA_MAC_VER_25 = 0x19, // 8168D + RTL_GIGA_MAC_VER_26 = 0x1a, // 8168D + RTL_GIGA_MAC_VER_27 = 0x1b // 8168DP }; #define _R(NAME,MAC,MASK) \ @@ -150,7 +152,9 @@ static const struct { _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_22, 0xff7e1880), // PCI-E _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_23, 0xff7e1880), // PCI-E _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_24, 0xff7e1880), // PCI-E - _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880) // PCI-E + _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880), // PCI-E + _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_26, 0xff7e1880), // PCI-E + _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_27, 0xff7e1880) // PCI-E }; #undef _R @@ -253,6 +257,13 @@ enum rtl8168_8101_registers { DBG_REG = 0xd1, #define FIX_NAK_1 (1 << 4) #define FIX_NAK_2 (1 << 3) + EFUSEAR = 0xdc, +#define EFUSEAR_FLAG 0x80000000 +#define EFUSEAR_WRITE_CMD 0x80000000 +#define EFUSEAR_READ_CMD 0x00000000 +#define EFUSEAR_REG_MASK 0x03ff +#define EFUSEAR_REG_SHIFT 8 +#define EFUSEAR_DATA_MASK 0xff }; enum rtl_register_content { @@ -568,6 +579,14 @@ static void mdio_patch(void __iomem *ioaddr, int reg_addr, int value) mdio_write(ioaddr, reg_addr, mdio_read(ioaddr, reg_addr) | value); } +static void mdio_plus_minus(void __iomem *ioaddr, int reg_addr, int p, int m) +{ + int val; + + val = mdio_read(ioaddr, reg_addr); + mdio_write(ioaddr, reg_addr, (val | p) & ~m); +} + static void rtl_mdio_write(struct net_device *dev, int phy_id, int location, int val) { @@ -651,6 +670,24 @@ static u32 rtl_csi_read(void __iomem *ioaddr, int addr) return value; } +static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr) +{ + u8 value = 0xff; + unsigned int i; + + RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); + + for (i = 0; i < 300; i++) { + if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) { + value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK; + break; + } + udelay(100); + } + + return value; +} + static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr) { RTL_W16(IntrMask, 0x0000); @@ -1243,7 +1280,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, int mac_version; } mac_info[] = { /* 8168D family. */ - { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_25 }, + { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 }, + { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 }, + { 0x7c800000, 0x28800000, RTL_GIGA_MAC_VER_27 }, + { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 }, /* 8168C family. */ { 0x7cf00000, 0x3ca00000, RTL_GIGA_MAC_VER_24 }, @@ -1648,74 +1688,903 @@ static void rtl8168c_4_hw_phy_config(void __iomem *ioaddr) rtl8168c_3_hw_phy_config(ioaddr); } -static void rtl8168d_hw_phy_config(void __iomem *ioaddr) +static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr) { - struct phy_reg phy_reg_init_0[] = { + static struct phy_reg phy_reg_init_0[] = { { 0x1f, 0x0001 }, - { 0x09, 0x2770 }, - { 0x08, 0x04d0 }, - { 0x0b, 0xad15 }, - { 0x0c, 0x5bf0 }, - { 0x1c, 0xf101 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, { 0x1f, 0x0003 }, - { 0x14, 0x94d7 }, - { 0x12, 0xf4d6 }, - { 0x09, 0xca0f }, - { 0x1f, 0x0002 }, - { 0x0b, 0x0b10 }, - { 0x0c, 0xd1f7 }, - { 0x1f, 0x0002 }, - { 0x06, 0x5461 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 } + }; + static struct phy_reg phy_reg_init_1[] = { { 0x1f, 0x0002 }, - { 0x05, 0x6662 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 } + }; + static struct phy_reg phy_reg_init_2[] = { + { 0x1f, 0x0005 }, + { 0x05, 0xffc2 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8000 }, + { 0x06, 0xf8f9 }, + { 0x06, 0xfaef }, + { 0x06, 0x59ee }, + { 0x06, 0xf8ea }, + { 0x06, 0x00ee }, + { 0x06, 0xf8eb }, + { 0x06, 0x00e0 }, + { 0x06, 0xf87c }, + { 0x06, 0xe1f8 }, + { 0x06, 0x7d59 }, + { 0x06, 0x0fef }, + { 0x06, 0x0139 }, + { 0x06, 0x029e }, + { 0x06, 0x06ef }, + { 0x06, 0x1039 }, + { 0x06, 0x089f }, + { 0x06, 0x2aee }, + { 0x06, 0xf8ea }, + { 0x06, 0x00ee }, + { 0x06, 0xf8eb }, + { 0x06, 0x01e0 }, + { 0x06, 0xf87c }, + { 0x06, 0xe1f8 }, + { 0x06, 0x7d58 }, + { 0x06, 0x409e }, + { 0x06, 0x0f39 }, + { 0x06, 0x46aa }, + { 0x06, 0x0bbf }, + { 0x06, 0x8290 }, + { 0x06, 0xd682 }, + { 0x06, 0x9802 }, + { 0x06, 0x014f }, + { 0x06, 0xae09 }, + { 0x06, 0xbf82 }, + { 0x06, 0x98d6 }, + { 0x06, 0x82a0 }, + { 0x06, 0x0201 }, + { 0x06, 0x4fef }, + { 0x06, 0x95fe }, + { 0x06, 0xfdfc }, + { 0x06, 0x05f8 }, + { 0x06, 0xf9fa }, + { 0x06, 0xeef8 }, + { 0x06, 0xea00 }, + { 0x06, 0xeef8 }, + { 0x06, 0xeb00 }, + { 0x06, 0xe2f8 }, + { 0x06, 0x7ce3 }, + { 0x06, 0xf87d }, + { 0x06, 0xa511 }, + { 0x06, 0x1112 }, + { 0x06, 0xd240 }, + { 0x06, 0xd644 }, + { 0x06, 0x4402 }, + { 0x06, 0x8217 }, + { 0x06, 0xd2a0 }, + { 0x06, 0xd6aa }, + { 0x06, 0xaa02 }, + { 0x06, 0x8217 }, + { 0x06, 0xae0f }, + { 0x06, 0xa544 }, + { 0x06, 0x4402 }, + { 0x06, 0xae4d }, + { 0x06, 0xa5aa }, + { 0x06, 0xaa02 }, + { 0x06, 0xae47 }, + { 0x06, 0xaf82 }, + { 0x06, 0x13ee }, + { 0x06, 0x834e }, + { 0x06, 0x00ee }, + { 0x06, 0x834d }, + { 0x06, 0x0fee }, + { 0x06, 0x834c }, + { 0x06, 0x0fee }, + { 0x06, 0x834f }, + { 0x06, 0x00ee }, + { 0x06, 0x8351 }, + { 0x06, 0x00ee }, + { 0x06, 0x834a }, + { 0x06, 0xffee }, + { 0x06, 0x834b }, + { 0x06, 0xffe0 }, + { 0x06, 0x8330 }, + { 0x06, 0xe183 }, + { 0x06, 0x3158 }, + { 0x06, 0xfee4 }, + { 0x06, 0xf88a }, + { 0x06, 0xe5f8 }, + { 0x06, 0x8be0 }, + { 0x06, 0x8332 }, + { 0x06, 0xe183 }, + { 0x06, 0x3359 }, + { 0x06, 0x0fe2 }, + { 0x06, 0x834d }, + { 0x06, 0x0c24 }, + { 0x06, 0x5af0 }, + { 0x06, 0x1e12 }, + { 0x06, 0xe4f8 }, + { 0x06, 0x8ce5 }, + { 0x06, 0xf88d }, + { 0x06, 0xaf82 }, + { 0x06, 0x13e0 }, + { 0x06, 0x834f }, + { 0x06, 0x10e4 }, + { 0x06, 0x834f }, + { 0x06, 0xe083 }, + { 0x06, 0x4e78 }, + { 0x06, 0x009f }, + { 0x06, 0x0ae0 }, + { 0x06, 0x834f }, + { 0x06, 0xa010 }, + { 0x06, 0xa5ee }, + { 0x06, 0x834e }, + { 0x06, 0x01e0 }, + { 0x06, 0x834e }, + { 0x06, 0x7805 }, + { 0x06, 0x9e9a }, + { 0x06, 0xe083 }, + { 0x06, 0x4e78 }, + { 0x06, 0x049e }, + { 0x06, 0x10e0 }, + { 0x06, 0x834e }, + { 0x06, 0x7803 }, + { 0x06, 0x9e0f }, + { 0x06, 0xe083 }, + { 0x06, 0x4e78 }, + { 0x06, 0x019e }, + { 0x06, 0x05ae }, + { 0x06, 0x0caf }, + { 0x06, 0x81f8 }, + { 0x06, 0xaf81 }, + { 0x06, 0xa3af }, + { 0x06, 0x81dc }, + { 0x06, 0xaf82 }, + { 0x06, 0x13ee }, + { 0x06, 0x8348 }, + { 0x06, 0x00ee }, + { 0x06, 0x8349 }, + { 0x06, 0x00e0 }, + { 0x06, 0x8351 }, + { 0x06, 0x10e4 }, + { 0x06, 0x8351 }, + { 0x06, 0x5801 }, + { 0x06, 0x9fea }, + { 0x06, 0xd000 }, + { 0x06, 0xd180 }, + { 0x06, 0x1f66 }, + { 0x06, 0xe2f8 }, + { 0x06, 0xeae3 }, + { 0x06, 0xf8eb }, + { 0x06, 0x5af8 }, + { 0x06, 0x1e20 }, + { 0x06, 0xe6f8 }, + { 0x06, 0xeae5 }, + { 0x06, 0xf8eb }, + { 0x06, 0xd302 }, + { 0x06, 0xb3fe }, + { 0x06, 0xe2f8 }, + { 0x06, 0x7cef }, + { 0x06, 0x325b }, + { 0x06, 0x80e3 }, + { 0x06, 0xf87d }, + { 0x06, 0x9e03 }, + { 0x06, 0x7dff }, + { 0x06, 0xff0d }, + { 0x06, 0x581c }, + { 0x06, 0x551a }, + { 0x06, 0x6511 }, + { 0x06, 0xa190 }, + { 0x06, 0xd3e2 }, + { 0x06, 0x8348 }, + { 0x06, 0xe383 }, + { 0x06, 0x491b }, + { 0x06, 0x56ab }, + { 0x06, 0x08ef }, + { 0x06, 0x56e6 }, + { 0x06, 0x8348 }, + { 0x06, 0xe783 }, + { 0x06, 0x4910 }, + { 0x06, 0xd180 }, + { 0x06, 0x1f66 }, + { 0x06, 0xa004 }, + { 0x06, 0xb9e2 }, + { 0x06, 0x8348 }, + { 0x06, 0xe383 }, + { 0x06, 0x49ef }, + { 0x06, 0x65e2 }, + { 0x06, 0x834a }, + { 0x06, 0xe383 }, + { 0x06, 0x4b1b }, + { 0x06, 0x56aa }, + { 0x06, 0x0eef }, + { 0x06, 0x56e6 }, + { 0x06, 0x834a }, + { 0x06, 0xe783 }, + { 0x06, 0x4be2 }, + { 0x06, 0x834d }, + { 0x06, 0xe683 }, + { 0x06, 0x4ce0 }, + { 0x06, 0x834d }, + { 0x06, 0xa000 }, + { 0x06, 0x0caf }, + { 0x06, 0x81dc }, + { 0x06, 0xe083 }, + { 0x06, 0x4d10 }, + { 0x06, 0xe483 }, + { 0x06, 0x4dae }, + { 0x06, 0x0480 }, + { 0x06, 0xe483 }, + { 0x06, 0x4de0 }, + { 0x06, 0x834e }, + { 0x06, 0x7803 }, + { 0x06, 0x9e0b }, + { 0x06, 0xe083 }, + { 0x06, 0x4e78 }, + { 0x06, 0x049e }, + { 0x06, 0x04ee }, + { 0x06, 0x834e }, + { 0x06, 0x02e0 }, + { 0x06, 0x8332 }, + { 0x06, 0xe183 }, + { 0x06, 0x3359 }, + { 0x06, 0x0fe2 }, + { 0x06, 0x834d }, + { 0x06, 0x0c24 }, + { 0x06, 0x5af0 }, + { 0x06, 0x1e12 }, + { 0x06, 0xe4f8 }, + { 0x06, 0x8ce5 }, + { 0x06, 0xf88d }, + { 0x06, 0xe083 }, + { 0x06, 0x30e1 }, + { 0x06, 0x8331 }, + { 0x06, 0x6801 }, + { 0x06, 0xe4f8 }, + { 0x06, 0x8ae5 }, + { 0x06, 0xf88b }, + { 0x06, 0xae37 }, + { 0x06, 0xee83 }, + { 0x06, 0x4e03 }, + { 0x06, 0xe083 }, + { 0x06, 0x4ce1 }, + { 0x06, 0x834d }, + { 0x06, 0x1b01 }, + { 0x06, 0x9e04 }, + { 0x06, 0xaaa1 }, + { 0x06, 0xaea8 }, + { 0x06, 0xee83 }, + { 0x06, 0x4e04 }, + { 0x06, 0xee83 }, + { 0x06, 0x4f00 }, + { 0x06, 0xaeab }, + { 0x06, 0xe083 }, + { 0x06, 0x4f78 }, + { 0x06, 0x039f }, + { 0x06, 0x14ee }, + { 0x06, 0x834e }, + { 0x06, 0x05d2 }, + { 0x06, 0x40d6 }, + { 0x06, 0x5554 }, + { 0x06, 0x0282 }, + { 0x06, 0x17d2 }, + { 0x06, 0xa0d6 }, + { 0x06, 0xba00 }, + { 0x06, 0x0282 }, + { 0x06, 0x17fe }, + { 0x06, 0xfdfc }, + { 0x06, 0x05f8 }, + { 0x06, 0xe0f8 }, + { 0x06, 0x60e1 }, + { 0x06, 0xf861 }, + { 0x06, 0x6802 }, + { 0x06, 0xe4f8 }, + { 0x06, 0x60e5 }, + { 0x06, 0xf861 }, + { 0x06, 0xe0f8 }, + { 0x06, 0x48e1 }, + { 0x06, 0xf849 }, + { 0x06, 0x580f }, + { 0x06, 0x1e02 }, + { 0x06, 0xe4f8 }, + { 0x06, 0x48e5 }, + { 0x06, 0xf849 }, + { 0x06, 0xd000 }, + { 0x06, 0x0282 }, + { 0x06, 0x5bbf }, + { 0x06, 0x8350 }, + { 0x06, 0xef46 }, + { 0x06, 0xdc19 }, + { 0x06, 0xddd0 }, + { 0x06, 0x0102 }, + { 0x06, 0x825b }, + { 0x06, 0x0282 }, + { 0x06, 0x77e0 }, + { 0x06, 0xf860 }, + { 0x06, 0xe1f8 }, + { 0x06, 0x6158 }, + { 0x06, 0xfde4 }, + { 0x06, 0xf860 }, + { 0x06, 0xe5f8 }, + { 0x06, 0x61fc }, + { 0x06, 0x04f9 }, + { 0x06, 0xfafb }, + { 0x06, 0xc6bf }, + { 0x06, 0xf840 }, + { 0x06, 0xbe83 }, + { 0x06, 0x50a0 }, + { 0x06, 0x0101 }, + { 0x06, 0x071b }, + { 0x06, 0x89cf }, + { 0x06, 0xd208 }, + { 0x06, 0xebdb }, + { 0x06, 0x19b2 }, + { 0x06, 0xfbff }, + { 0x06, 0xfefd }, + { 0x06, 0x04f8 }, + { 0x06, 0xe0f8 }, + { 0x06, 0x48e1 }, + { 0x06, 0xf849 }, + { 0x06, 0x6808 }, + { 0x06, 0xe4f8 }, + { 0x06, 0x48e5 }, + { 0x06, 0xf849 }, + { 0x06, 0x58f7 }, + { 0x06, 0xe4f8 }, + { 0x06, 0x48e5 }, + { 0x06, 0xf849 }, + { 0x06, 0xfc04 }, + { 0x06, 0x4d20 }, + { 0x06, 0x0002 }, + { 0x06, 0x4e22 }, + { 0x06, 0x0002 }, + { 0x06, 0x4ddf }, + { 0x06, 0xff01 }, + { 0x06, 0x4edd }, + { 0x06, 0xff01 }, + { 0x05, 0x83d4 }, + { 0x06, 0x8000 }, + { 0x05, 0x83d8 }, + { 0x06, 0x8051 }, + { 0x02, 0x6010 }, + { 0x03, 0xdc00 }, + { 0x05, 0xfff6 }, + { 0x06, 0x00fc }, { 0x1f, 0x0000 }, - { 0x14, 0x0060 }, + { 0x1f, 0x0000 }, - { 0x0d, 0xf8a0 }, + { 0x0d, 0xf880 }, + { 0x1f, 0x0000 } + }; + + rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); + + mdio_write(ioaddr, 0x1f, 0x0002); + mdio_plus_minus(ioaddr, 0x0b, 0x0010, 0x00ef); + mdio_plus_minus(ioaddr, 0x0c, 0xa200, 0x5d00); + + rtl_phy_write(ioaddr, phy_reg_init_1, ARRAY_SIZE(phy_reg_init_1)); + + if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) { + struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x669a }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x669a }, + { 0x1f, 0x0002 } + }; + int val; + + rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + val = mdio_read(ioaddr, 0x0d); + + if ((val & 0x00ff) != 0x006c) { + u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + mdio_write(ioaddr, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + mdio_write(ioaddr, 0x0d, val | set[i]); + } + } else { + struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x6662 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x6662 } + }; + + rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + mdio_write(ioaddr, 0x1f, 0x0002); + mdio_patch(ioaddr, 0x0d, 0x0300); + mdio_patch(ioaddr, 0x0f, 0x0010); + + mdio_write(ioaddr, 0x1f, 0x0002); + mdio_plus_minus(ioaddr, 0x02, 0x0100, 0x0600); + mdio_plus_minus(ioaddr, 0x03, 0x0000, 0xe000); + + rtl_phy_write(ioaddr, phy_reg_init_2, ARRAY_SIZE(phy_reg_init_2)); +} + +static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr) +{ + static struct phy_reg phy_reg_init_0[] = { + { 0x1f, 0x0001 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, + { 0x1f, 0x0003 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 }, + + { 0x1f, 0x0002 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 } + }; + static struct phy_reg phy_reg_init_1[] = { + { 0x1f, 0x0005 }, + { 0x05, 0xffc2 }, { 0x1f, 0x0005 }, - { 0x05, 0xffc2 } + { 0x05, 0x8000 }, + { 0x06, 0xf8f9 }, + { 0x06, 0xfaee }, + { 0x06, 0xf8ea }, + { 0x06, 0x00ee }, + { 0x06, 0xf8eb }, + { 0x06, 0x00e2 }, + { 0x06, 0xf87c }, + { 0x06, 0xe3f8 }, + { 0x06, 0x7da5 }, + { 0x06, 0x1111 }, + { 0x06, 0x12d2 }, + { 0x06, 0x40d6 }, + { 0x06, 0x4444 }, + { 0x06, 0x0281 }, + { 0x06, 0xc6d2 }, + { 0x06, 0xa0d6 }, + { 0x06, 0xaaaa }, + { 0x06, 0x0281 }, + { 0x06, 0xc6ae }, + { 0x06, 0x0fa5 }, + { 0x06, 0x4444 }, + { 0x06, 0x02ae }, + { 0x06, 0x4da5 }, + { 0x06, 0xaaaa }, + { 0x06, 0x02ae }, + { 0x06, 0x47af }, + { 0x06, 0x81c2 }, + { 0x06, 0xee83 }, + { 0x06, 0x4e00 }, + { 0x06, 0xee83 }, + { 0x06, 0x4d0f }, + { 0x06, 0xee83 }, + { 0x06, 0x4c0f }, + { 0x06, 0xee83 }, + { 0x06, 0x4f00 }, + { 0x06, 0xee83 }, + { 0x06, 0x5100 }, + { 0x06, 0xee83 }, + { 0x06, 0x4aff }, + { 0x06, 0xee83 }, + { 0x06, 0x4bff }, + { 0x06, 0xe083 }, + { 0x06, 0x30e1 }, + { 0x06, 0x8331 }, + { 0x06, 0x58fe }, + { 0x06, 0xe4f8 }, + { 0x06, 0x8ae5 }, + { 0x06, 0xf88b }, + { 0x06, 0xe083 }, + { 0x06, 0x32e1 }, + { 0x06, 0x8333 }, + { 0x06, 0x590f }, + { 0x06, 0xe283 }, + { 0x06, 0x4d0c }, + { 0x06, 0x245a }, + { 0x06, 0xf01e }, + { 0x06, 0x12e4 }, + { 0x06, 0xf88c }, + { 0x06, 0xe5f8 }, + { 0x06, 0x8daf }, + { 0x06, 0x81c2 }, + { 0x06, 0xe083 }, + { 0x06, 0x4f10 }, + { 0x06, 0xe483 }, + { 0x06, 0x4fe0 }, + { 0x06, 0x834e }, + { 0x06, 0x7800 }, + { 0x06, 0x9f0a }, + { 0x06, 0xe083 }, + { 0x06, 0x4fa0 }, + { 0x06, 0x10a5 }, + { 0x06, 0xee83 }, + { 0x06, 0x4e01 }, + { 0x06, 0xe083 }, + { 0x06, 0x4e78 }, + { 0x06, 0x059e }, + { 0x06, 0x9ae0 }, + { 0x06, 0x834e }, + { 0x06, 0x7804 }, + { 0x06, 0x9e10 }, + { 0x06, 0xe083 }, + { 0x06, 0x4e78 }, + { 0x06, 0x039e }, + { 0x06, 0x0fe0 }, + { 0x06, 0x834e }, + { 0x06, 0x7801 }, + { 0x06, 0x9e05 }, + { 0x06, 0xae0c }, + { 0x06, 0xaf81 }, + { 0x06, 0xa7af }, + { 0x06, 0x8152 }, + { 0x06, 0xaf81 }, + { 0x06, 0x8baf }, + { 0x06, 0x81c2 }, + { 0x06, 0xee83 }, + { 0x06, 0x4800 }, + { 0x06, 0xee83 }, + { 0x06, 0x4900 }, + { 0x06, 0xe083 }, + { 0x06, 0x5110 }, + { 0x06, 0xe483 }, + { 0x06, 0x5158 }, + { 0x06, 0x019f }, + { 0x06, 0xead0 }, + { 0x06, 0x00d1 }, + { 0x06, 0x801f }, + { 0x06, 0x66e2 }, + { 0x06, 0xf8ea }, + { 0x06, 0xe3f8 }, + { 0x06, 0xeb5a }, + { 0x06, 0xf81e }, + { 0x06, 0x20e6 }, + { 0x06, 0xf8ea }, + { 0x06, 0xe5f8 }, + { 0x06, 0xebd3 }, + { 0x06, 0x02b3 }, + { 0x06, 0xfee2 }, + { 0x06, 0xf87c }, + { 0x06, 0xef32 }, + { 0x06, 0x5b80 }, + { 0x06, 0xe3f8 }, + { 0x06, 0x7d9e }, + { 0x06, 0x037d }, + { 0x06, 0xffff }, + { 0x06, 0x0d58 }, + { 0x06, 0x1c55 }, + { 0x06, 0x1a65 }, + { 0x06, 0x11a1 }, + { 0x06, 0x90d3 }, + { 0x06, 0xe283 }, + { 0x06, 0x48e3 }, + { 0x06, 0x8349 }, + { 0x06, 0x1b56 }, + { 0x06, 0xab08 }, + { 0x06, 0xef56 }, + { 0x06, 0xe683 }, + { 0x06, 0x48e7 }, + { 0x06, 0x8349 }, + { 0x06, 0x10d1 }, + { 0x06, 0x801f }, + { 0x06, 0x66a0 }, + { 0x06, 0x04b9 }, + { 0x06, 0xe283 }, + { 0x06, 0x48e3 }, + { 0x06, 0x8349 }, + { 0x06, 0xef65 }, + { 0x06, 0xe283 }, + { 0x06, 0x4ae3 }, + { 0x06, 0x834b }, + { 0x06, 0x1b56 }, + { 0x06, 0xaa0e }, + { 0x06, 0xef56 }, + { 0x06, 0xe683 }, + { 0x06, 0x4ae7 }, + { 0x06, 0x834b }, + { 0x06, 0xe283 }, + { 0x06, 0x4de6 }, + { 0x06, 0x834c }, + { 0x06, 0xe083 }, + { 0x06, 0x4da0 }, + { 0x06, 0x000c }, + { 0x06, 0xaf81 }, + { 0x06, 0x8be0 }, + { 0x06, 0x834d }, + { 0x06, 0x10e4 }, + { 0x06, 0x834d }, + { 0x06, 0xae04 }, + { 0x06, 0x80e4 }, + { 0x06, 0x834d }, + { 0x06, 0xe083 }, + { 0x06, 0x4e78 }, + { 0x06, 0x039e }, + { 0x06, 0x0be0 }, + { 0x06, 0x834e }, + { 0x06, 0x7804 }, + { 0x06, 0x9e04 }, + { 0x06, 0xee83 }, + { 0x06, 0x4e02 }, + { 0x06, 0xe083 }, + { 0x06, 0x32e1 }, + { 0x06, 0x8333 }, + { 0x06, 0x590f }, + { 0x06, 0xe283 }, + { 0x06, 0x4d0c }, + { 0x06, 0x245a }, + { 0x06, 0xf01e }, + { 0x06, 0x12e4 }, + { 0x06, 0xf88c }, + { 0x06, 0xe5f8 }, + { 0x06, 0x8de0 }, + { 0x06, 0x8330 }, + { 0x06, 0xe183 }, + { 0x06, 0x3168 }, + { 0x06, 0x01e4 }, + { 0x06, 0xf88a }, + { 0x06, 0xe5f8 }, + { 0x06, 0x8bae }, + { 0x06, 0x37ee }, + { 0x06, 0x834e }, + { 0x06, 0x03e0 }, + { 0x06, 0x834c }, + { 0x06, 0xe183 }, + { 0x06, 0x4d1b }, + { 0x06, 0x019e }, + { 0x06, 0x04aa }, + { 0x06, 0xa1ae }, + { 0x06, 0xa8ee }, + { 0x06, 0x834e }, + { 0x06, 0x04ee }, + { 0x06, 0x834f }, + { 0x06, 0x00ae }, + { 0x06, 0xabe0 }, + { 0x06, 0x834f }, + { 0x06, 0x7803 }, + { 0x06, 0x9f14 }, + { 0x06, 0xee83 }, + { 0x06, 0x4e05 }, + { 0x06, 0xd240 }, + { 0x06, 0xd655 }, + { 0x06, 0x5402 }, + { 0x06, 0x81c6 }, + { 0x06, 0xd2a0 }, + { 0x06, 0xd6ba }, + { 0x06, 0x0002 }, + { 0x06, 0x81c6 }, + { 0x06, 0xfefd }, + { 0x06, 0xfc05 }, + { 0x06, 0xf8e0 }, + { 0x06, 0xf860 }, + { 0x06, 0xe1f8 }, + { 0x06, 0x6168 }, + { 0x06, 0x02e4 }, + { 0x06, 0xf860 }, + { 0x06, 0xe5f8 }, + { 0x06, 0x61e0 }, + { 0x06, 0xf848 }, + { 0x06, 0xe1f8 }, + { 0x06, 0x4958 }, + { 0x06, 0x0f1e }, + { 0x06, 0x02e4 }, + { 0x06, 0xf848 }, + { 0x06, 0xe5f8 }, + { 0x06, 0x49d0 }, + { 0x06, 0x0002 }, + { 0x06, 0x820a }, + { 0x06, 0xbf83 }, + { 0x06, 0x50ef }, + { 0x06, 0x46dc }, + { 0x06, 0x19dd }, + { 0x06, 0xd001 }, + { 0x06, 0x0282 }, + { 0x06, 0x0a02 }, + { 0x06, 0x8226 }, + { 0x06, 0xe0f8 }, + { 0x06, 0x60e1 }, + { 0x06, 0xf861 }, + { 0x06, 0x58fd }, + { 0x06, 0xe4f8 }, + { 0x06, 0x60e5 }, + { 0x06, 0xf861 }, + { 0x06, 0xfc04 }, + { 0x06, 0xf9fa }, + { 0x06, 0xfbc6 }, + { 0x06, 0xbff8 }, + { 0x06, 0x40be }, + { 0x06, 0x8350 }, + { 0x06, 0xa001 }, + { 0x06, 0x0107 }, + { 0x06, 0x1b89 }, + { 0x06, 0xcfd2 }, + { 0x06, 0x08eb }, + { 0x06, 0xdb19 }, + { 0x06, 0xb2fb }, + { 0x06, 0xfffe }, + { 0x06, 0xfd04 }, + { 0x06, 0xf8e0 }, + { 0x06, 0xf848 }, + { 0x06, 0xe1f8 }, + { 0x06, 0x4968 }, + { 0x06, 0x08e4 }, + { 0x06, 0xf848 }, + { 0x06, 0xe5f8 }, + { 0x06, 0x4958 }, + { 0x06, 0xf7e4 }, + { 0x06, 0xf848 }, + { 0x06, 0xe5f8 }, + { 0x06, 0x49fc }, + { 0x06, 0x044d }, + { 0x06, 0x2000 }, + { 0x06, 0x024e }, + { 0x06, 0x2200 }, + { 0x06, 0x024d }, + { 0x06, 0xdfff }, + { 0x06, 0x014e }, + { 0x06, 0xddff }, + { 0x06, 0x0100 }, + { 0x05, 0x83d8 }, + { 0x06, 0x8000 }, + { 0x03, 0xdc00 }, + { 0x05, 0xfff6 }, + { 0x06, 0x00fc }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 }, + { 0x1f, 0x0000 } }; rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); - if (mdio_read(ioaddr, 0x06) == 0xc400) { - struct phy_reg phy_reg_init_1[] = { + if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) { + struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x669a }, { 0x1f, 0x0005 }, - { 0x01, 0x0300 }, - { 0x1f, 0x0000 }, - { 0x11, 0x401c }, - { 0x16, 0x4100 }, + { 0x05, 0x8330 }, + { 0x06, 0x669a }, + + { 0x1f, 0x0002 } + }; + int val; + + rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + val = mdio_read(ioaddr, 0x0d); + if ((val & 0x00ff) != 0x006c) { + u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + mdio_write(ioaddr, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + mdio_write(ioaddr, 0x0d, val | set[i]); + } + } else { + struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x2642 }, { 0x1f, 0x0005 }, - { 0x07, 0x0010 }, - { 0x05, 0x83dc }, - { 0x06, 0x087d }, - { 0x05, 0x8300 }, - { 0x06, 0x0101 }, - { 0x06, 0x05f8 }, - { 0x06, 0xf9fa }, - { 0x06, 0xfbef }, - { 0x06, 0x79e2 }, - { 0x06, 0x835f }, - { 0x06, 0xe0f8 }, - { 0x06, 0x9ae1 }, - { 0x06, 0xf89b }, - { 0x06, 0xef31 }, - { 0x06, 0x3b65 }, - { 0x06, 0xaa07 }, - { 0x06, 0x81e4 }, - { 0x06, 0xf89a }, - { 0x06, 0xe5f8 }, - { 0x06, 0x9baf }, - { 0x06, 0x06ae }, - { 0x05, 0x83dc }, - { 0x06, 0x8300 }, + { 0x05, 0x8330 }, + { 0x06, 0x2642 } }; - rtl_phy_write(ioaddr, phy_reg_init_1, - ARRAY_SIZE(phy_reg_init_1)); + rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); } - mdio_write(ioaddr, 0x1f, 0x0000); + mdio_write(ioaddr, 0x1f, 0x0002); + mdio_plus_minus(ioaddr, 0x02, 0x0100, 0x0600); + mdio_plus_minus(ioaddr, 0x03, 0x0000, 0xe000); + + mdio_write(ioaddr, 0x1f, 0x0001); + mdio_write(ioaddr, 0x17, 0x0cc0); + + mdio_write(ioaddr, 0x1f, 0x0002); + mdio_patch(ioaddr, 0x0f, 0x0017); + + rtl_phy_write(ioaddr, phy_reg_init_1, ARRAY_SIZE(phy_reg_init_1)); +} + +static void rtl8168d_3_hw_phy_config(void __iomem *ioaddr) +{ + struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x10, 0x0008 }, + { 0x0d, 0x006c }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0xa4d8 }, + { 0x09, 0x281c }, + { 0x07, 0x2883 }, + { 0x0a, 0x6b35 }, + { 0x1d, 0x3da4 }, + { 0x1c, 0xeffd }, + { 0x14, 0x7f52 }, + { 0x18, 0x7fc6 }, + { 0x08, 0x0601 }, + { 0x06, 0x4063 }, + { 0x10, 0xf074 }, + { 0x1f, 0x0003 }, + { 0x13, 0x0789 }, + { 0x12, 0xf4bd }, + { 0x1a, 0x04fd }, + { 0x14, 0x84b0 }, + { 0x1f, 0x0000 }, + { 0x00, 0x9200 }, + + { 0x1f, 0x0005 }, + { 0x01, 0x0340 }, + { 0x1f, 0x0001 }, + { 0x04, 0x4000 }, + { 0x03, 0x1d21 }, + { 0x02, 0x0c32 }, + { 0x01, 0x0200 }, + { 0x00, 0x5554 }, + { 0x04, 0x4800 }, + { 0x04, 0x4000 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0007 }, + { 0x1e, 0x0023 }, + { 0x16, 0x0000 }, + { 0x1f, 0x0000 } + }; + + rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); } static void rtl8102e_hw_phy_config(void __iomem *ioaddr) @@ -1792,7 +2661,13 @@ static void rtl_hw_phy_config(struct net_device *dev) rtl8168cp_2_hw_phy_config(ioaddr); break; case RTL_GIGA_MAC_VER_25: - rtl8168d_hw_phy_config(ioaddr); + rtl8168d_1_hw_phy_config(ioaddr); + break; + case RTL_GIGA_MAC_VER_26: + rtl8168d_2_hw_phy_config(ioaddr); + break; + case RTL_GIGA_MAC_VER_27: + rtl8168d_3_hw_phy_config(ioaddr); break; default: @@ -2863,6 +3738,8 @@ static void rtl_hw_start_8168(struct net_device *dev) break; case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: rtl_hw_start_8168d(ioaddr, pdev); break; diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig new file mode 100644 index 00000000000..35eaa5251d7 --- /dev/null +++ b/drivers/net/stmmac/Kconfig @@ -0,0 +1,53 @@ +config STMMAC_ETH + tristate "STMicroelectronics 10/100/1000 Ethernet driver" + select MII + select PHYLIB + depends on NETDEVICES && CPU_SUBTYPE_ST40 + help + This is the driver for the ST MAC 10/100/1000 on-chip Ethernet + controllers. ST Ethernet IPs are built around a Synopsys IP Core. + +if STMMAC_ETH + +config STMMAC_DA + bool "STMMAC DMA arbitration scheme" + default n + help + Selecting this option, rx has priority over Tx (only for Giga + Ethernet device). + By default, the DMA arbitration scheme is based on Round-robin + (rx:tx priority is 1:1). + +config STMMAC_DUAL_MAC + bool "STMMAC: dual mac support (EXPERIMENTAL)" + default n + depends on EXPERIMENTAL && STMMAC_ETH && !STMMAC_TIMER + help + Some ST SoCs (for example the stx7141 and stx7200c2) have two + Ethernet Controllers. This option turns on the second Ethernet + device on this kind of platforms. + +config STMMAC_TIMER + bool "STMMAC Timer optimisation" + default n + help + Use an external timer for mitigating the number of network + interrupts. + +choice + prompt "Select Timer device" + depends on STMMAC_TIMER + +config STMMAC_TMU_TIMER + bool "TMU channel 2" + depends on CPU_SH4 + help + +config STMMAC_RTC_TIMER + bool "Real time clock" + depends on RTC_CLASS + help + +endchoice + +endif diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile new file mode 100644 index 00000000000..b2d7a5564df --- /dev/null +++ b/drivers/net/stmmac/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_STMMAC_ETH) += stmmac.o +stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o +stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \ + mac100.o gmac.o $(stmmac-y) diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h new file mode 100644 index 00000000000..e49e5188e88 --- /dev/null +++ b/drivers/net/stmmac/common.h @@ -0,0 +1,330 @@ +/******************************************************************************* + STMMAC Common Header File + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#include "descs.h" +#include <linux/io.h> + +/* ********************************************* + DMA CRS Control and Status Register Mapping + * *********************************************/ +#define DMA_BUS_MODE 0x00001000 /* Bus Mode */ +#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */ +#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */ +#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */ +#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */ +#define DMA_STATUS 0x00001014 /* Status Register */ +#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */ +#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */ +#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */ +#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */ +#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */ + +/* ******************************** + DMA Control register defines + * ********************************/ +#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */ +#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */ + +/* ************************************** + DMA Interrupt Enable register defines + * **************************************/ +/**** NORMAL INTERRUPT ****/ +#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */ +#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */ +#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */ +#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */ +#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */ + +#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \ + DMA_INTR_ENA_TIE) + +/**** ABNORMAL INTERRUPT ****/ +#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */ +#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */ +#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */ +#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */ +#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */ +#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */ +#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */ +#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */ +#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */ +#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */ + +#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \ + DMA_INTR_ENA_UNE) + +/* DMA default interrupt mask */ +#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL) + +/* **************************** + * DMA Status register defines + * ****************************/ +#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */ +#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */ +#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int. */ +#define DMA_STATUS_GMI 0x08000000 +#define DMA_STATUS_GLI 0x04000000 +#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */ +#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */ +#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */ +#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */ +#define DMA_STATUS_TS_SHIFT 20 +#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */ +#define DMA_STATUS_RS_SHIFT 17 +#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */ +#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */ +#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */ +#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */ +#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */ +#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */ +#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */ +#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */ +#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */ +#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */ +#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */ +#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */ +#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */ +#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */ +#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ + +/* Other defines */ +#define HASH_TABLE_SIZE 64 +#define PAUSE_TIME 0x200 + +/* Flow Control defines */ +#define FLOW_OFF 0 +#define FLOW_RX 1 +#define FLOW_TX 2 +#define FLOW_AUTO (FLOW_TX | FLOW_RX) + +/* DMA STORE-AND-FORWARD Operation Mode */ +#define SF_DMA_MODE 1 + +#define HW_CSUM 1 +#define NO_HW_CSUM 0 + +/* GMAC TX FIFO is 8K, Rx FIFO is 16K */ +#define BUF_SIZE_16KiB 16384 +#define BUF_SIZE_8KiB 8192 +#define BUF_SIZE_4KiB 4096 +#define BUF_SIZE_2KiB 2048 + +/* Power Down and WOL */ +#define PMT_NOT_SUPPORTED 0 +#define PMT_SUPPORTED 1 + +/* Common MAC defines */ +#define MAC_CTRL_REG 0x00000000 /* MAC Control */ +#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */ +#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */ + +/* MAC Management Counters register */ +#define MMC_CONTROL 0x00000100 /* MMC Control */ +#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */ +#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */ +#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */ +#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */ + +#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */ +#define MMC_CONTROL_MAX_FRM_SHIFT 3 +#define MMC_CONTROL_MAX_FRAME 0x7FF + +struct stmmac_extra_stats { + /* Transmit errors */ + unsigned long tx_underflow ____cacheline_aligned; + unsigned long tx_carrier; + unsigned long tx_losscarrier; + unsigned long tx_heartbeat; + unsigned long tx_deferred; + unsigned long tx_vlan; + unsigned long tx_jabber; + unsigned long tx_frame_flushed; + unsigned long tx_payload_error; + unsigned long tx_ip_header_error; + /* Receive errors */ + unsigned long rx_desc; + unsigned long rx_partial; + unsigned long rx_runt; + unsigned long rx_toolong; + unsigned long rx_collision; + unsigned long rx_crc; + unsigned long rx_lenght; + unsigned long rx_mii; + unsigned long rx_multicast; + unsigned long rx_gmac_overflow; + unsigned long rx_watchdog; + unsigned long da_rx_filter_fail; + unsigned long sa_rx_filter_fail; + unsigned long rx_missed_cntr; + unsigned long rx_overflow_cntr; + unsigned long rx_vlan; + /* Tx/Rx IRQ errors */ + unsigned long tx_undeflow_irq; + unsigned long tx_process_stopped_irq; + unsigned long tx_jabber_irq; + unsigned long rx_overflow_irq; + unsigned long rx_buf_unav_irq; + unsigned long rx_process_stopped_irq; + unsigned long rx_watchdog_irq; + unsigned long tx_early_irq; + unsigned long fatal_bus_error_irq; + /* Extra info */ + unsigned long threshold; + unsigned long tx_pkt_n; + unsigned long rx_pkt_n; + unsigned long poll_n; + unsigned long sched_timer_n; + unsigned long normal_irq_n; +}; + +/* GMAC core can compute the checksums in HW. */ +enum rx_frame_status { + good_frame = 0, + discard_frame = 1, + csum_none = 2, +}; + +static inline void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6], + unsigned int high, unsigned int low) +{ + unsigned long data; + + data = (addr[5] << 8) | addr[4]; + writel(data, ioaddr + high); + data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + writel(data, ioaddr + low); + + return; +} + +static inline void stmmac_get_mac_addr(unsigned long ioaddr, + unsigned char *addr, unsigned int high, + unsigned int low) +{ + unsigned int hi_addr, lo_addr; + + /* Read the MAC address from the hardware */ + hi_addr = readl(ioaddr + high); + lo_addr = readl(ioaddr + low); + + /* Extract the MAC address from the high and low words */ + addr[0] = lo_addr & 0xff; + addr[1] = (lo_addr >> 8) & 0xff; + addr[2] = (lo_addr >> 16) & 0xff; + addr[3] = (lo_addr >> 24) & 0xff; + addr[4] = hi_addr & 0xff; + addr[5] = (hi_addr >> 8) & 0xff; + + return; +} + +struct stmmac_ops { + /* MAC core initialization */ + void (*core_init) (unsigned long ioaddr) ____cacheline_aligned; + /* DMA core initialization */ + int (*dma_init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx); + /* Dump MAC registers */ + void (*dump_mac_regs) (unsigned long ioaddr); + /* Dump DMA registers */ + void (*dump_dma_regs) (unsigned long ioaddr); + /* Set tx/rx threshold in the csr6 register + * An invalid value enables the store-and-forward mode */ + void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode); + /* To track extra statistic (if supported) */ + void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, + unsigned long ioaddr); + /* RX descriptor ring initialization */ + void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size, + int disable_rx_ic); + /* TX descriptor ring initialization */ + void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size); + + /* Invoked by the xmit function to prepare the tx descriptor */ + void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len, + int csum_flag); + /* Set/get the owner of the descriptor */ + void (*set_tx_owner) (struct dma_desc *p); + int (*get_tx_owner) (struct dma_desc *p); + /* Invoked by the xmit function to close the tx descriptor */ + void (*close_tx_desc) (struct dma_desc *p); + /* Clean the tx descriptor as soon as the tx irq is received */ + void (*release_tx_desc) (struct dma_desc *p); + /* Clear interrupt on tx frame completion. When this bit is + * set an interrupt happens as soon as the frame is transmitted */ + void (*clear_tx_ic) (struct dma_desc *p); + /* Last tx segment reports the transmit status */ + int (*get_tx_ls) (struct dma_desc *p); + /* Return the transmit status looking at the TDES1 */ + int (*tx_status) (void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, unsigned long ioaddr); + /* Get the buffer size from the descriptor */ + int (*get_tx_len) (struct dma_desc *p); + /* Handle extra events on specific interrupts hw dependent */ + void (*host_irq_status) (unsigned long ioaddr); + int (*get_rx_owner) (struct dma_desc *p); + void (*set_rx_owner) (struct dma_desc *p); + /* Get the receive frame size */ + int (*get_rx_frame_len) (struct dma_desc *p); + /* Return the reception status looking at the RDES1 */ + int (*rx_status) (void *data, struct stmmac_extra_stats *x, + struct dma_desc *p); + /* Multicast filter setting */ + void (*set_filter) (struct net_device *dev); + /* Flow control setting */ + void (*flow_ctrl) (unsigned long ioaddr, unsigned int duplex, + unsigned int fc, unsigned int pause_time); + /* Set power management mode (e.g. magic frame) */ + void (*pmt) (unsigned long ioaddr, unsigned long mode); + /* Set/Get Unicast MAC addresses */ + void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr, + unsigned int reg_n); + void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr, + unsigned int reg_n); +}; + +struct mac_link { + int port; + int duplex; + int speed; +}; + +struct mii_regs { + unsigned int addr; /* MII Address */ + unsigned int data; /* MII Data */ +}; + +struct hw_cap { + unsigned int version; /* Core Version register (GMAC) */ + unsigned int pmt; /* Power-Down mode (GMAC) */ + struct mac_link link; + struct mii_regs mii; +}; + +struct mac_device_info { + struct hw_cap hw; + struct stmmac_ops *ops; +}; + +struct mac_device_info *gmac_setup(unsigned long addr); +struct mac_device_info *mac100_setup(unsigned long addr); diff --git a/drivers/net/stmmac/descs.h b/drivers/net/stmmac/descs.h new file mode 100644 index 00000000000..6d2a0b2f5e5 --- /dev/null +++ b/drivers/net/stmmac/descs.h @@ -0,0 +1,163 @@ +/******************************************************************************* + Header File to describe the DMA descriptors + Use enhanced descriptors in case of GMAC Cores. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ +struct dma_desc { + /* Receive descriptor */ + union { + struct { + /* RDES0 */ + u32 reserved1:1; + u32 crc_error:1; + u32 dribbling:1; + u32 mii_error:1; + u32 receive_watchdog:1; + u32 frame_type:1; + u32 collision:1; + u32 frame_too_long:1; + u32 last_descriptor:1; + u32 first_descriptor:1; + u32 multicast_frame:1; + u32 run_frame:1; + u32 length_error:1; + u32 partial_frame_error:1; + u32 descriptor_error:1; + u32 error_summary:1; + u32 frame_length:14; + u32 filtering_fail:1; + u32 own:1; + /* RDES1 */ + u32 buffer1_size:11; + u32 buffer2_size:11; + u32 reserved2:2; + u32 second_address_chained:1; + u32 end_ring:1; + u32 reserved3:5; + u32 disable_ic:1; + } rx; + struct { + /* RDES0 */ + u32 payload_csum_error:1; + u32 crc_error:1; + u32 dribbling:1; + u32 error_gmii:1; + u32 receive_watchdog:1; + u32 frame_type:1; + u32 late_collision:1; + u32 ipc_csum_error:1; + u32 last_descriptor:1; + u32 first_descriptor:1; + u32 vlan_tag:1; + u32 overflow_error:1; + u32 length_error:1; + u32 sa_filter_fail:1; + u32 descriptor_error:1; + u32 error_summary:1; + u32 frame_length:14; + u32 da_filter_fail:1; + u32 own:1; + /* RDES1 */ + u32 buffer1_size:13; + u32 reserved1:1; + u32 second_address_chained:1; + u32 end_ring:1; + u32 buffer2_size:13; + u32 reserved2:2; + u32 disable_ic:1; + } erx; /* -- enhanced -- */ + + /* Transmit descriptor */ + struct { + /* TDES0 */ + u32 deferred:1; + u32 underflow_error:1; + u32 excessive_deferral:1; + u32 collision_count:4; + u32 heartbeat_fail:1; + u32 excessive_collisions:1; + u32 late_collision:1; + u32 no_carrier:1; + u32 loss_carrier:1; + u32 reserved1:3; + u32 error_summary:1; + u32 reserved2:15; + u32 own:1; + /* TDES1 */ + u32 buffer1_size:11; + u32 buffer2_size:11; + u32 reserved3:1; + u32 disable_padding:1; + u32 second_address_chained:1; + u32 end_ring:1; + u32 crc_disable:1; + u32 reserved4:2; + u32 first_segment:1; + u32 last_segment:1; + u32 interrupt:1; + } tx; + struct { + /* TDES0 */ + u32 deferred:1; + u32 underflow_error:1; + u32 excessive_deferral:1; + u32 collision_count:4; + u32 vlan_frame:1; + u32 excessive_collisions:1; + u32 late_collision:1; + u32 no_carrier:1; + u32 loss_carrier:1; + u32 payload_error:1; + u32 frame_flushed:1; + u32 jabber_timeout:1; + u32 error_summary:1; + u32 ip_header_error:1; + u32 time_stamp_status:1; + u32 reserved1:2; + u32 second_address_chained:1; + u32 end_ring:1; + u32 checksum_insertion:2; + u32 reserved2:1; + u32 time_stamp_enable:1; + u32 disable_padding:1; + u32 crc_disable:1; + u32 first_segment:1; + u32 last_segment:1; + u32 interrupt:1; + u32 own:1; + /* TDES1 */ + u32 buffer1_size:13; + u32 reserved3:3; + u32 buffer2_size:13; + u32 reserved4:3; + } etx; /* -- enhanced -- */ + } des01; + unsigned int des2; + unsigned int des3; +}; + +/* Transmit checksum insertion control */ +enum tdes_csum_insertion { + cic_disabled = 0, /* Checksum Insertion Control */ + cic_only_ip = 1, /* Only IP header */ + cic_no_pseudoheader = 2, /* IP header but pseudoheader + * is not calculated */ + cic_full = 3, /* IP header and pseudoheader */ +}; diff --git a/drivers/net/stmmac/gmac.c b/drivers/net/stmmac/gmac.c new file mode 100644 index 00000000000..b624bb5bae0 --- /dev/null +++ b/drivers/net/stmmac/gmac.c @@ -0,0 +1,693 @@ +/******************************************************************************* + This is the driver for the GMAC on-chip Ethernet controller for ST SoCs. + DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for + developing this code. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#include <linux/netdevice.h> +#include <linux/crc32.h> +#include <linux/mii.h> +#include <linux/phy.h> + +#include "stmmac.h" +#include "gmac.h" + +#undef GMAC_DEBUG +/*#define GMAC_DEBUG*/ +#undef FRAME_FILTER_DEBUG +/*#define FRAME_FILTER_DEBUG*/ +#ifdef GMAC_DEBUG +#define DBG(fmt, args...) printk(fmt, ## args) +#else +#define DBG(fmt, args...) do { } while (0) +#endif + +static void gmac_dump_regs(unsigned long ioaddr) +{ + int i; + pr_info("\t----------------------------------------------\n" + "\t GMAC registers (base addr = 0x%8x)\n" + "\t----------------------------------------------\n", + (unsigned int)ioaddr); + + for (i = 0; i < 55; i++) { + int offset = i * 4; + pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i, + offset, readl(ioaddr + offset)); + } + return; +} + +static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx) +{ + u32 value = readl(ioaddr + DMA_BUS_MODE); + /* DMA SW reset */ + value |= DMA_BUS_MODE_SFT_RESET; + writel(value, ioaddr + DMA_BUS_MODE); + do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)); + + value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL | + ((pbl << DMA_BUS_MODE_PBL_SHIFT) | + (pbl << DMA_BUS_MODE_RPBL_SHIFT)); + +#ifdef CONFIG_STMMAC_DA + value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */ +#endif + writel(value, ioaddr + DMA_BUS_MODE); + + /* Mask interrupts by writing to CSR7 */ + writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); + + /* The base address of the RX/TX descriptor lists must be written into + * DMA CSR3 and CSR4, respectively. */ + writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); + writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); + + return 0; +} + +/* Transmit FIFO flush operation */ +static void gmac_flush_tx_fifo(unsigned long ioaddr) +{ + u32 csr6 = readl(ioaddr + DMA_CONTROL); + writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL); + + do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF)); +} + +static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode, + int rxmode) +{ + u32 csr6 = readl(ioaddr + DMA_CONTROL); + + if (txmode == SF_DMA_MODE) { + DBG(KERN_DEBUG "GMAC: enabling TX store and forward mode\n"); + /* Transmit COE type 2 cannot be done in cut-through mode. */ + csr6 |= DMA_CONTROL_TSF; + /* Operating on second frame increase the performance + * especially when transmit store-and-forward is used.*/ + csr6 |= DMA_CONTROL_OSF; + } else { + DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode" + " (threshold = %d)\n", txmode); + csr6 &= ~DMA_CONTROL_TSF; + csr6 &= DMA_CONTROL_TC_TX_MASK; + /* Set the transmit threashold */ + if (txmode <= 32) + csr6 |= DMA_CONTROL_TTC_32; + else if (txmode <= 64) + csr6 |= DMA_CONTROL_TTC_64; + else if (txmode <= 128) + csr6 |= DMA_CONTROL_TTC_128; + else if (txmode <= 192) + csr6 |= DMA_CONTROL_TTC_192; + else + csr6 |= DMA_CONTROL_TTC_256; + } + + if (rxmode == SF_DMA_MODE) { + DBG(KERN_DEBUG "GMAC: enabling RX store and forward mode\n"); + csr6 |= DMA_CONTROL_RSF; + } else { + DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode" + " (threshold = %d)\n", rxmode); + csr6 &= ~DMA_CONTROL_RSF; + csr6 &= DMA_CONTROL_TC_RX_MASK; + if (rxmode <= 32) + csr6 |= DMA_CONTROL_RTC_32; + else if (rxmode <= 64) + csr6 |= DMA_CONTROL_RTC_64; + else if (rxmode <= 96) + csr6 |= DMA_CONTROL_RTC_96; + else + csr6 |= DMA_CONTROL_RTC_128; + } + + writel(csr6, ioaddr + DMA_CONTROL); + return; +} + +/* Not yet implemented --- no RMON module */ +static void gmac_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, + unsigned long ioaddr) +{ + return; +} + +static void gmac_dump_dma_regs(unsigned long ioaddr) +{ + int i; + pr_info(" DMA registers\n"); + for (i = 0; i < 22; i++) { + if ((i < 9) || (i > 17)) { + int offset = i * 4; + pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i, + (DMA_BUS_MODE + offset), + readl(ioaddr + DMA_BUS_MODE + offset)); + } + } + return; +} + +static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, unsigned long ioaddr) +{ + int ret = 0; + struct net_device_stats *stats = (struct net_device_stats *)data; + + if (unlikely(p->des01.etx.error_summary)) { + DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx); + if (unlikely(p->des01.etx.jabber_timeout)) { + DBG(KERN_ERR "\tjabber_timeout error\n"); + x->tx_jabber++; + } + + if (unlikely(p->des01.etx.frame_flushed)) { + DBG(KERN_ERR "\tframe_flushed error\n"); + x->tx_frame_flushed++; + gmac_flush_tx_fifo(ioaddr); + } + + if (unlikely(p->des01.etx.loss_carrier)) { + DBG(KERN_ERR "\tloss_carrier error\n"); + x->tx_losscarrier++; + stats->tx_carrier_errors++; + } + if (unlikely(p->des01.etx.no_carrier)) { + DBG(KERN_ERR "\tno_carrier error\n"); + x->tx_carrier++; + stats->tx_carrier_errors++; + } + if (unlikely(p->des01.etx.late_collision)) { + DBG(KERN_ERR "\tlate_collision error\n"); + stats->collisions += p->des01.etx.collision_count; + } + if (unlikely(p->des01.etx.excessive_collisions)) { + DBG(KERN_ERR "\texcessive_collisions\n"); + stats->collisions += p->des01.etx.collision_count; + } + if (unlikely(p->des01.etx.excessive_deferral)) { + DBG(KERN_INFO "\texcessive tx_deferral\n"); + x->tx_deferred++; + } + + if (unlikely(p->des01.etx.underflow_error)) { + DBG(KERN_ERR "\tunderflow error\n"); + gmac_flush_tx_fifo(ioaddr); + x->tx_underflow++; + } + + if (unlikely(p->des01.etx.ip_header_error)) { + DBG(KERN_ERR "\tTX IP header csum error\n"); + x->tx_ip_header_error++; + } + + if (unlikely(p->des01.etx.payload_error)) { + DBG(KERN_ERR "\tAddr/Payload csum error\n"); + x->tx_payload_error++; + gmac_flush_tx_fifo(ioaddr); + } + + ret = -1; + } + + if (unlikely(p->des01.etx.deferred)) { + DBG(KERN_INFO "GMAC TX status: tx deferred\n"); + x->tx_deferred++; + } +#ifdef STMMAC_VLAN_TAG_USED + if (p->des01.etx.vlan_frame) { + DBG(KERN_INFO "GMAC TX status: VLAN frame\n"); + x->tx_vlan++; + } +#endif + + return ret; +} + +static int gmac_get_tx_len(struct dma_desc *p) +{ + return p->des01.etx.buffer1_size; +} + +static int gmac_coe_rdes0(int ipc_err, int type, int payload_err) +{ + int ret = good_frame; + u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7; + + /* bits 5 7 0 | Frame status + * ---------------------------------------------------------- + * 0 0 0 | IEEE 802.3 Type frame (lenght < 1536 octects) + * 1 0 0 | IPv4/6 No CSUM errorS. + * 1 0 1 | IPv4/6 CSUM PAYLOAD error + * 1 1 0 | IPv4/6 CSUM IP HR error + * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS + * 0 0 1 | IPv4/6 unsupported IP PAYLOAD + * 0 1 1 | COE bypassed.. no IPv4/6 frame + * 0 1 0 | Reserved. + */ + if (status == 0x0) { + DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n"); + ret = good_frame; + } else if (status == 0x4) { + DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n"); + ret = good_frame; + } else if (status == 0x5) { + DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n"); + ret = csum_none; + } else if (status == 0x6) { + DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n"); + ret = csum_none; + } else if (status == 0x7) { + DBG(KERN_ERR + "RX Des0 status: IPv4/6 Header and Payload Error.\n"); + ret = csum_none; + } else if (status == 0x1) { + DBG(KERN_ERR + "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n"); + ret = discard_frame; + } else if (status == 0x3) { + DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n"); + ret = discard_frame; + } + return ret; +} + +static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p) +{ + int ret = good_frame; + struct net_device_stats *stats = (struct net_device_stats *)data; + + if (unlikely(p->des01.erx.error_summary)) { + DBG(KERN_ERR "GMAC RX Error Summary... 0x%08x\n", p->des01.erx); + if (unlikely(p->des01.erx.descriptor_error)) { + DBG(KERN_ERR "\tdescriptor error\n"); + x->rx_desc++; + stats->rx_length_errors++; + } + if (unlikely(p->des01.erx.overflow_error)) { + DBG(KERN_ERR "\toverflow error\n"); + x->rx_gmac_overflow++; + } + + if (unlikely(p->des01.erx.ipc_csum_error)) + DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n"); + + if (unlikely(p->des01.erx.late_collision)) { + DBG(KERN_ERR "\tlate_collision error\n"); + stats->collisions++; + stats->collisions++; + } + if (unlikely(p->des01.erx.receive_watchdog)) { + DBG(KERN_ERR "\treceive_watchdog error\n"); + x->rx_watchdog++; + } + if (unlikely(p->des01.erx.error_gmii)) { + DBG(KERN_ERR "\tReceive Error\n"); + x->rx_mii++; + } + if (unlikely(p->des01.erx.crc_error)) { + DBG(KERN_ERR "\tCRC error\n"); + x->rx_crc++; + stats->rx_crc_errors++; + } + ret = discard_frame; + } + + /* After a payload csum error, the ES bit is set. + * It doesn't match with the information reported into the databook. + * At any rate, we need to understand if the CSUM hw computation is ok + * and report this info to the upper layers. */ + ret = gmac_coe_rdes0(p->des01.erx.ipc_csum_error, + p->des01.erx.frame_type, p->des01.erx.payload_csum_error); + + if (unlikely(p->des01.erx.dribbling)) { + DBG(KERN_ERR "GMAC RX: dribbling error\n"); + ret = discard_frame; + } + if (unlikely(p->des01.erx.sa_filter_fail)) { + DBG(KERN_ERR "GMAC RX : Source Address filter fail\n"); + x->sa_rx_filter_fail++; + ret = discard_frame; + } + if (unlikely(p->des01.erx.da_filter_fail)) { + DBG(KERN_ERR "GMAC RX : Destination Address filter fail\n"); + x->da_rx_filter_fail++; + ret = discard_frame; + } + if (unlikely(p->des01.erx.length_error)) { + DBG(KERN_ERR "GMAC RX: length_error error\n"); + x->rx_lenght++; + ret = discard_frame; + } +#ifdef STMMAC_VLAN_TAG_USED + if (p->des01.erx.vlan_tag) { + DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n"); + x->rx_vlan++; + } +#endif + return ret; +} + +static void gmac_irq_status(unsigned long ioaddr) +{ + u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); + + /* Not used events (e.g. MMC interrupts) are not handled. */ + if ((intr_status & mmc_tx_irq)) + DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n", + readl(ioaddr + GMAC_MMC_TX_INTR)); + if (unlikely(intr_status & mmc_rx_irq)) + DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n", + readl(ioaddr + GMAC_MMC_RX_INTR)); + if (unlikely(intr_status & mmc_rx_csum_offload_irq)) + DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n", + readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD)); + if (unlikely(intr_status & pmt_irq)) { + DBG(KERN_DEBUG "GMAC: received Magic frame\n"); + /* clear the PMT bits 5 and 6 by reading the PMT + * status register. */ + readl(ioaddr + GMAC_PMT); + } + + return; +} + +static void gmac_core_init(unsigned long ioaddr) +{ + u32 value = readl(ioaddr + GMAC_CONTROL); + value |= GMAC_CORE_INIT; + writel(value, ioaddr + GMAC_CONTROL); + + /* STBus Bridge Configuration */ + /*writel(0xc5608, ioaddr + 0x00007000);*/ + + /* Freeze MMC counters */ + writel(0x8, ioaddr + GMAC_MMC_CTRL); + /* Mask GMAC interrupts */ + writel(0x207, ioaddr + GMAC_INT_MASK); + +#ifdef STMMAC_VLAN_TAG_USED + /* Tag detection without filtering */ + writel(0x0, ioaddr + GMAC_VLAN_TAG); +#endif + return; +} + +static void gmac_set_umac_addr(unsigned long ioaddr, unsigned char *addr, + unsigned int reg_n) +{ + stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), + GMAC_ADDR_LOW(reg_n)); +} + +static void gmac_get_umac_addr(unsigned long ioaddr, unsigned char *addr, + unsigned int reg_n) +{ + stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), + GMAC_ADDR_LOW(reg_n)); +} + +static void gmac_set_filter(struct net_device *dev) +{ + unsigned long ioaddr = dev->base_addr; + unsigned int value = 0; + + DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n", + __func__, dev->mc_count, dev->uc_count); + + if (dev->flags & IFF_PROMISC) + value = GMAC_FRAME_FILTER_PR; + else if ((dev->mc_count > HASH_TABLE_SIZE) + || (dev->flags & IFF_ALLMULTI)) { + value = GMAC_FRAME_FILTER_PM; /* pass all multi */ + writel(0xffffffff, ioaddr + GMAC_HASH_HIGH); + writel(0xffffffff, ioaddr + GMAC_HASH_LOW); + } else if (dev->mc_count > 0) { + int i; + u32 mc_filter[2]; + struct dev_mc_list *mclist; + + /* Hash filter for multicast */ + value = GMAC_FRAME_FILTER_HMC; + + memset(mc_filter, 0, sizeof(mc_filter)); + for (i = 0, mclist = dev->mc_list; + mclist && i < dev->mc_count; i++, mclist = mclist->next) { + /* The upper 6 bits of the calculated CRC are used to + index the contens of the hash table */ + int bit_nr = + bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26; + /* The most significant bit determines the register to + * use (H/L) while the other 5 bits determine the bit + * within the register. */ + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + } + writel(mc_filter[0], ioaddr + GMAC_HASH_LOW); + writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH); + } + + /* Handle multiple unicast addresses (perfect filtering)*/ + if (dev->uc_count > GMAC_MAX_UNICAST_ADDRESSES) + /* Switch to promiscuous mode is more than 16 addrs + are required */ + value |= GMAC_FRAME_FILTER_PR; + else { + int i; + struct dev_addr_list *uc_ptr = dev->uc_list; + + for (i = 0; i < dev->uc_count; i++) { + gmac_set_umac_addr(ioaddr, uc_ptr->da_addr, + i + 1); + + DBG(KERN_INFO "\t%d " + "- Unicast addr %02x:%02x:%02x:%02x:%02x:" + "%02x\n", i + 1, + uc_ptr->da_addr[0], uc_ptr->da_addr[1], + uc_ptr->da_addr[2], uc_ptr->da_addr[3], + uc_ptr->da_addr[4], uc_ptr->da_addr[5]); + uc_ptr = uc_ptr->next; + } + } + +#ifdef FRAME_FILTER_DEBUG + /* Enable Receive all mode (to debug filtering_fail errors) */ + value |= GMAC_FRAME_FILTER_RA; +#endif + writel(value, ioaddr + GMAC_FRAME_FILTER); + + DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: " + "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER), + readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW)); + + return; +} + +static void gmac_flow_ctrl(unsigned long ioaddr, unsigned int duplex, + unsigned int fc, unsigned int pause_time) +{ + unsigned int flow = 0; + + DBG(KERN_DEBUG "GMAC Flow-Control:\n"); + if (fc & FLOW_RX) { + DBG(KERN_DEBUG "\tReceive Flow-Control ON\n"); + flow |= GMAC_FLOW_CTRL_RFE; + } + if (fc & FLOW_TX) { + DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n"); + flow |= GMAC_FLOW_CTRL_TFE; + } + + if (duplex) { + DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time); + flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT); + } + + writel(flow, ioaddr + GMAC_FLOW_CTRL); + return; +} + +static void gmac_pmt(unsigned long ioaddr, unsigned long mode) +{ + unsigned int pmt = 0; + + if (mode == WAKE_MAGIC) { + DBG(KERN_DEBUG "GMAC: WOL Magic frame\n"); + pmt |= power_down | magic_pkt_en; + } else if (mode == WAKE_UCAST) { + DBG(KERN_DEBUG "GMAC: WOL on global unicast\n"); + pmt |= global_unicast; + } + + writel(pmt, ioaddr + GMAC_PMT); + return; +} + +static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size, + int disable_rx_ic) +{ + int i; + for (i = 0; i < ring_size; i++) { + p->des01.erx.own = 1; + p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1; + /* To support jumbo frames */ + p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1; + if (i == ring_size - 1) + p->des01.erx.end_ring = 1; + if (disable_rx_ic) + p->des01.erx.disable_ic = 1; + p++; + } + return; +} + +static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size) +{ + int i; + + for (i = 0; i < ring_size; i++) { + p->des01.etx.own = 0; + if (i == ring_size - 1) + p->des01.etx.end_ring = 1; + p++; + } + + return; +} + +static int gmac_get_tx_owner(struct dma_desc *p) +{ + return p->des01.etx.own; +} + +static int gmac_get_rx_owner(struct dma_desc *p) +{ + return p->des01.erx.own; +} + +static void gmac_set_tx_owner(struct dma_desc *p) +{ + p->des01.etx.own = 1; +} + +static void gmac_set_rx_owner(struct dma_desc *p) +{ + p->des01.erx.own = 1; +} + +static int gmac_get_tx_ls(struct dma_desc *p) +{ + return p->des01.etx.last_segment; +} + +static void gmac_release_tx_desc(struct dma_desc *p) +{ + int ter = p->des01.etx.end_ring; + + memset(p, 0, sizeof(struct dma_desc)); + p->des01.etx.end_ring = ter; + + return; +} + +static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, + int csum_flag) +{ + p->des01.etx.first_segment = is_fs; + if (unlikely(len > BUF_SIZE_4KiB)) { + p->des01.etx.buffer1_size = BUF_SIZE_4KiB; + p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB; + } else { + p->des01.etx.buffer1_size = len; + } + if (likely(csum_flag)) + p->des01.etx.checksum_insertion = cic_full; +} + +static void gmac_clear_tx_ic(struct dma_desc *p) +{ + p->des01.etx.interrupt = 0; +} + +static void gmac_close_tx_desc(struct dma_desc *p) +{ + p->des01.etx.last_segment = 1; + p->des01.etx.interrupt = 1; +} + +static int gmac_get_rx_frame_len(struct dma_desc *p) +{ + return p->des01.erx.frame_length; +} + +struct stmmac_ops gmac_driver = { + .core_init = gmac_core_init, + .dump_mac_regs = gmac_dump_regs, + .dma_init = gmac_dma_init, + .dump_dma_regs = gmac_dump_dma_regs, + .dma_mode = gmac_dma_operation_mode, + .dma_diagnostic_fr = gmac_dma_diagnostic_fr, + .tx_status = gmac_get_tx_frame_status, + .rx_status = gmac_get_rx_frame_status, + .get_tx_len = gmac_get_tx_len, + .set_filter = gmac_set_filter, + .flow_ctrl = gmac_flow_ctrl, + .pmt = gmac_pmt, + .init_rx_desc = gmac_init_rx_desc, + .init_tx_desc = gmac_init_tx_desc, + .get_tx_owner = gmac_get_tx_owner, + .get_rx_owner = gmac_get_rx_owner, + .release_tx_desc = gmac_release_tx_desc, + .prepare_tx_desc = gmac_prepare_tx_desc, + .clear_tx_ic = gmac_clear_tx_ic, + .close_tx_desc = gmac_close_tx_desc, + .get_tx_ls = gmac_get_tx_ls, + .set_tx_owner = gmac_set_tx_owner, + .set_rx_owner = gmac_set_rx_owner, + .get_rx_frame_len = gmac_get_rx_frame_len, + .host_irq_status = gmac_irq_status, + .set_umac_addr = gmac_set_umac_addr, + .get_umac_addr = gmac_get_umac_addr, +}; + +struct mac_device_info *gmac_setup(unsigned long ioaddr) +{ + struct mac_device_info *mac; + u32 uid = readl(ioaddr + GMAC_VERSION); + + pr_info("\tGMAC - user ID: 0x%x, Synopsys ID: 0x%x\n", + ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff)); + + mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); + + mac->ops = &gmac_driver; + mac->hw.pmt = PMT_SUPPORTED; + mac->hw.link.port = GMAC_CONTROL_PS; + mac->hw.link.duplex = GMAC_CONTROL_DM; + mac->hw.link.speed = GMAC_CONTROL_FES; + mac->hw.mii.addr = GMAC_MII_ADDR; + mac->hw.mii.data = GMAC_MII_DATA; + + return mac; +} diff --git a/drivers/net/stmmac/gmac.h b/drivers/net/stmmac/gmac.h new file mode 100644 index 00000000000..684a363120a --- /dev/null +++ b/drivers/net/stmmac/gmac.h @@ -0,0 +1,204 @@ +/******************************************************************************* + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#define GMAC_CONTROL 0x00000000 /* Configuration */ +#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */ +#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */ +#define GMAC_HASH_LOW 0x0000000c /* Multicast Hash Table Low */ +#define GMAC_MII_ADDR 0x00000010 /* MII Address */ +#define GMAC_MII_DATA 0x00000014 /* MII Data */ +#define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */ +#define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */ +#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */ +#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */ + +#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */ +enum gmac_irq_status { + time_stamp_irq = 0x0200, + mmc_rx_csum_offload_irq = 0x0080, + mmc_tx_irq = 0x0040, + mmc_rx_irq = 0x0020, + mmc_irq = 0x0010, + pmt_irq = 0x0008, + pcs_ane_irq = 0x0004, + pcs_link_irq = 0x0002, + rgmii_irq = 0x0001, +}; +#define GMAC_INT_MASK 0x0000003c /* interrupt mask register */ + +/* PMT Control and Status */ +#define GMAC_PMT 0x0000002c +enum power_event { + pointer_reset = 0x80000000, + global_unicast = 0x00000200, + wake_up_rx_frame = 0x00000040, + magic_frame = 0x00000020, + wake_up_frame_en = 0x00000004, + magic_pkt_en = 0x00000002, + power_down = 0x00000001, +}; + +/* GMAC HW ADDR regs */ +#define GMAC_ADDR_HIGH(reg) (0x00000040+(reg * 8)) +#define GMAC_ADDR_LOW(reg) (0x00000044+(reg * 8)) +#define GMAC_MAX_UNICAST_ADDRESSES 16 + +#define GMAC_AN_CTRL 0x000000c0 /* AN control */ +#define GMAC_AN_STATUS 0x000000c4 /* AN status */ +#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */ +#define GMAC_ANE_LINK 0x000000cc /* Auto-Neg. link partener ability */ +#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */ +#define GMAC_TBI 0x000000d4 /* TBI extend status */ +#define GMAC_GMII_STATUS 0x000000d8 /* S/R-GMII status */ + +/* GMAC Configuration defines */ +#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */ +#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */ +#define GMAC_CONTROL_JD 0x00400000 /* Jabber disable */ +#define GMAC_CONTROL_BE 0x00200000 /* Frame Burst Enable */ +#define GMAC_CONTROL_JE 0x00100000 /* Jumbo frame */ +enum inter_frame_gap { + GMAC_CONTROL_IFG_88 = 0x00040000, + GMAC_CONTROL_IFG_80 = 0x00020000, + GMAC_CONTROL_IFG_40 = 0x000e0000, +}; +#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense during tx */ +#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */ +#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */ +#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */ +#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */ +#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */ +#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */ +#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */ +#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */ +#define GMAC_CONTROL_ACS 0x00000080 /* Automatic Pad Stripping */ +#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */ +#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ +#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ + +#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \ + GMAC_CONTROL_IPC | GMAC_CONTROL_JE | GMAC_CONTROL_BE) + +/* GMAC Frame Filter defines */ +#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ +#define GMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */ +#define GMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */ +#define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */ +#define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */ +#define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */ +#define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */ +#define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */ +#define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */ +#define GMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */ +/* GMII ADDR defines */ +#define GMAC_MII_ADDR_WRITE 0x00000002 /* MII Write */ +#define GMAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */ +/* GMAC FLOW CTRL defines */ +#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ +#define GMAC_FLOW_CTRL_PT_SHIFT 16 +#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ +#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ +#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ + +/*--- DMA BLOCK defines ---*/ +/* DMA Bus Mode register defines */ +#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ +#define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */ +#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ +#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ +/* Programmable burst length (passed thorugh platform)*/ +#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ +#define DMA_BUS_MODE_PBL_SHIFT 8 + +enum rx_tx_priority_ratio { + double_ratio = 0x00004000, /*2:1 */ + triple_ratio = 0x00008000, /*3:1 */ + quadruple_ratio = 0x0000c000, /*4:1 */ +}; + +#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */ +#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */ +#define DMA_BUS_MODE_RPBL_SHIFT 17 +#define DMA_BUS_MODE_USP 0x00800000 +#define DMA_BUS_MODE_4PBL 0x01000000 +#define DMA_BUS_MODE_AAL 0x02000000 + +/* DMA CRS Control and Status Register Mapping */ +#define DMA_HOST_TX_DESC 0x00001048 /* Current Host Tx descriptor */ +#define DMA_HOST_RX_DESC 0x0000104c /* Current Host Rx descriptor */ +/* DMA Bus Mode register defines */ +#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */ +#define DMA_BUS_PR_RATIO_SHIFT 14 +#define DMA_BUS_FB 0x00010000 /* Fixed Burst */ + +/* DMA operation mode defines (start/stop tx/rx are placed in common header)*/ +#define DMA_CONTROL_DT 0x04000000 /* Disable Drop TCP/IP csum error */ +#define DMA_CONTROL_RSF 0x02000000 /* Receive Store and Forward */ +#define DMA_CONTROL_DFF 0x01000000 /* Disaable flushing */ +/* Theshold for Activating the FC */ +enum rfa { + act_full_minus_1 = 0x00800000, + act_full_minus_2 = 0x00800200, + act_full_minus_3 = 0x00800400, + act_full_minus_4 = 0x00800600, +}; +/* Theshold for Deactivating the FC */ +enum rfd { + deac_full_minus_1 = 0x00400000, + deac_full_minus_2 = 0x00400800, + deac_full_minus_3 = 0x00401000, + deac_full_minus_4 = 0x00401800, +}; +#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */ +#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ + +enum ttc_control { + DMA_CONTROL_TTC_64 = 0x00000000, + DMA_CONTROL_TTC_128 = 0x00004000, + DMA_CONTROL_TTC_192 = 0x00008000, + DMA_CONTROL_TTC_256 = 0x0000c000, + DMA_CONTROL_TTC_40 = 0x00010000, + DMA_CONTROL_TTC_32 = 0x00014000, + DMA_CONTROL_TTC_24 = 0x00018000, + DMA_CONTROL_TTC_16 = 0x0001c000, +}; +#define DMA_CONTROL_TC_TX_MASK 0xfffe3fff + +#define DMA_CONTROL_EFC 0x00000100 +#define DMA_CONTROL_FEF 0x00000080 +#define DMA_CONTROL_FUF 0x00000040 + +enum rtc_control { + DMA_CONTROL_RTC_64 = 0x00000000, + DMA_CONTROL_RTC_32 = 0x00000008, + DMA_CONTROL_RTC_96 = 0x00000010, + DMA_CONTROL_RTC_128 = 0x00000018, +}; +#define DMA_CONTROL_TC_RX_MASK 0xffffffe7 + +#define DMA_CONTROL_OSF 0x00000004 /* Operate on second frame */ + +/* MMC registers offset */ +#define GMAC_MMC_CTRL 0x100 +#define GMAC_MMC_RX_INTR 0x104 +#define GMAC_MMC_TX_INTR 0x108 +#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 diff --git a/drivers/net/stmmac/mac100.c b/drivers/net/stmmac/mac100.c new file mode 100644 index 00000000000..625171b6062 --- /dev/null +++ b/drivers/net/stmmac/mac100.c @@ -0,0 +1,517 @@ +/******************************************************************************* + This is the driver for the MAC 10/100 on-chip Ethernet controller + currently tested on all the ST boards based on STb7109 and stx7200 SoCs. + + DWC Ether MAC 10/100 Universal version 4.0 has been used for developing + this code. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#include <linux/netdevice.h> +#include <linux/crc32.h> +#include <linux/mii.h> +#include <linux/phy.h> + +#include "common.h" +#include "mac100.h" + +#undef MAC100_DEBUG +/*#define MAC100_DEBUG*/ +#ifdef MAC100_DEBUG +#define DBG(fmt, args...) printk(fmt, ## args) +#else +#define DBG(fmt, args...) do { } while (0) +#endif + +static void mac100_core_init(unsigned long ioaddr) +{ + u32 value = readl(ioaddr + MAC_CONTROL); + + writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL); + +#ifdef STMMAC_VLAN_TAG_USED + writel(ETH_P_8021Q, ioaddr + MAC_VLAN1); +#endif + return; +} + +static void mac100_dump_mac_regs(unsigned long ioaddr) +{ + pr_info("\t----------------------------------------------\n" + "\t MAC100 CSR (base addr = 0x%8x)\n" + "\t----------------------------------------------\n", + (unsigned int)ioaddr); + pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL, + readl(ioaddr + MAC_CONTROL)); + pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH, + readl(ioaddr + MAC_ADDR_HIGH)); + pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW, + readl(ioaddr + MAC_ADDR_LOW)); + pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n", + MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH)); + pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n", + MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW)); + pr_info("\tflow control (offset 0x%x): 0x%08x\n", + MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL)); + pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1, + readl(ioaddr + MAC_VLAN1)); + pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2, + readl(ioaddr + MAC_VLAN2)); + pr_info("\n\tMAC management counter registers\n"); + pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n", + MMC_CONTROL, readl(ioaddr + MMC_CONTROL)); + pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n", + MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR)); + pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n", + MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR)); + pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n", + MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK)); + pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n", + MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK)); + return; +} + +static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, + u32 dma_rx) +{ + u32 value = readl(ioaddr + DMA_BUS_MODE); + /* DMA SW reset */ + value |= DMA_BUS_MODE_SFT_RESET; + writel(value, ioaddr + DMA_BUS_MODE); + do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)); + + /* Enable Application Access by writing to DMA CSR0 */ + writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT), + ioaddr + DMA_BUS_MODE); + + /* Mask interrupts by writing to CSR7 */ + writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); + + /* The base address of the RX/TX descriptor lists must be written into + * DMA CSR3 and CSR4, respectively. */ + writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); + writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); + + return 0; +} + +/* Store and Forward capability is not used at all.. + * The transmit threshold can be programmed by + * setting the TTC bits in the DMA control register.*/ +static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode, + int rxmode) +{ + u32 csr6 = readl(ioaddr + DMA_CONTROL); + + if (txmode <= 32) + csr6 |= DMA_CONTROL_TTC_32; + else if (txmode <= 64) + csr6 |= DMA_CONTROL_TTC_64; + else + csr6 |= DMA_CONTROL_TTC_128; + + writel(csr6, ioaddr + DMA_CONTROL); + + return; +} + +static void mac100_dump_dma_regs(unsigned long ioaddr) +{ + int i; + + DBG(KERN_DEBUG "MAC100 DMA CSR \n"); + for (i = 0; i < 9; i++) + pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i, + (DMA_BUS_MODE + i * 4), + readl(ioaddr + DMA_BUS_MODE + i * 4)); + DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n", + DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR)); + DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n", + DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR)); + return; +} + +/* DMA controller has two counters to track the number of + the receive missed frames. */ +static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, + unsigned long ioaddr) +{ + struct net_device_stats *stats = (struct net_device_stats *)data; + u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR); + + if (unlikely(csr8)) { + if (csr8 & DMA_MISSED_FRAME_OVE) { + stats->rx_over_errors += 0x800; + x->rx_overflow_cntr += 0x800; + } else { + unsigned int ove_cntr; + ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17); + stats->rx_over_errors += ove_cntr; + x->rx_overflow_cntr += ove_cntr; + } + + if (csr8 & DMA_MISSED_FRAME_OVE_M) { + stats->rx_missed_errors += 0xffff; + x->rx_missed_cntr += 0xffff; + } else { + unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR); + stats->rx_missed_errors += miss_f; + x->rx_missed_cntr += miss_f; + } + } + return; +} + +static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, unsigned long ioaddr) +{ + int ret = 0; + struct net_device_stats *stats = (struct net_device_stats *)data; + + if (unlikely(p->des01.tx.error_summary)) { + if (unlikely(p->des01.tx.underflow_error)) { + x->tx_underflow++; + stats->tx_fifo_errors++; + } + if (unlikely(p->des01.tx.no_carrier)) { + x->tx_carrier++; + stats->tx_carrier_errors++; + } + if (unlikely(p->des01.tx.loss_carrier)) { + x->tx_losscarrier++; + stats->tx_carrier_errors++; + } + if (unlikely((p->des01.tx.excessive_deferral) || + (p->des01.tx.excessive_collisions) || + (p->des01.tx.late_collision))) + stats->collisions += p->des01.tx.collision_count; + ret = -1; + } + if (unlikely(p->des01.tx.heartbeat_fail)) { + x->tx_heartbeat++; + stats->tx_heartbeat_errors++; + ret = -1; + } + if (unlikely(p->des01.tx.deferred)) + x->tx_deferred++; + + return ret; +} + +static int mac100_get_tx_len(struct dma_desc *p) +{ + return p->des01.tx.buffer1_size; +} + +/* This function verifies if each incoming frame has some errors + * and, if required, updates the multicast statistics. + * In case of success, it returns csum_none becasue the device + * is not able to compute the csum in HW. */ +static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p) +{ + int ret = csum_none; + struct net_device_stats *stats = (struct net_device_stats *)data; + + if (unlikely(p->des01.rx.last_descriptor == 0)) { + pr_warning("mac100 Error: Oversized Ethernet " + "frame spanned multiple buffers\n"); + stats->rx_length_errors++; + return discard_frame; + } + + if (unlikely(p->des01.rx.error_summary)) { + if (unlikely(p->des01.rx.descriptor_error)) + x->rx_desc++; + if (unlikely(p->des01.rx.partial_frame_error)) + x->rx_partial++; + if (unlikely(p->des01.rx.run_frame)) + x->rx_runt++; + if (unlikely(p->des01.rx.frame_too_long)) + x->rx_toolong++; + if (unlikely(p->des01.rx.collision)) { + x->rx_collision++; + stats->collisions++; + } + if (unlikely(p->des01.rx.crc_error)) { + x->rx_crc++; + stats->rx_crc_errors++; + } + ret = discard_frame; + } + if (unlikely(p->des01.rx.dribbling)) + ret = discard_frame; + + if (unlikely(p->des01.rx.length_error)) { + x->rx_lenght++; + ret = discard_frame; + } + if (unlikely(p->des01.rx.mii_error)) { + x->rx_mii++; + ret = discard_frame; + } + if (p->des01.rx.multicast_frame) { + x->rx_multicast++; + stats->multicast++; + } + return ret; +} + +static void mac100_irq_status(unsigned long ioaddr) +{ + return; +} + +static void mac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr, + unsigned int reg_n) +{ + stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); +} + +static void mac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr, + unsigned int reg_n) +{ + stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); +} + +static void mac100_set_filter(struct net_device *dev) +{ + unsigned long ioaddr = dev->base_addr; + u32 value = readl(ioaddr + MAC_CONTROL); + + if (dev->flags & IFF_PROMISC) { + value |= MAC_CONTROL_PR; + value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO | + MAC_CONTROL_HP); + } else if ((dev->mc_count > HASH_TABLE_SIZE) + || (dev->flags & IFF_ALLMULTI)) { + value |= MAC_CONTROL_PM; + value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO); + writel(0xffffffff, ioaddr + MAC_HASH_HIGH); + writel(0xffffffff, ioaddr + MAC_HASH_LOW); + } else if (dev->mc_count == 0) { /* no multicast */ + value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF | + MAC_CONTROL_HO | MAC_CONTROL_HP); + } else { + int i; + u32 mc_filter[2]; + struct dev_mc_list *mclist; + + /* Perfect filter mode for physical address and Hash + filter for multicast */ + value |= MAC_CONTROL_HP; + value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF + | MAC_CONTROL_HO); + + memset(mc_filter, 0, sizeof(mc_filter)); + for (i = 0, mclist = dev->mc_list; + mclist && i < dev->mc_count; i++, mclist = mclist->next) { + /* The upper 6 bits of the calculated CRC are used to + * index the contens of the hash table */ + int bit_nr = + ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + /* The most significant bit determines the register to + * use (H/L) while the other 5 bits determine the bit + * within the register. */ + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + } + writel(mc_filter[0], ioaddr + MAC_HASH_LOW); + writel(mc_filter[1], ioaddr + MAC_HASH_HIGH); + } + + writel(value, ioaddr + MAC_CONTROL); + + DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: " + "HI 0x%08x, LO 0x%08x\n", + __func__, readl(ioaddr + MAC_CONTROL), + readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW)); + return; +} + +static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex, + unsigned int fc, unsigned int pause_time) +{ + unsigned int flow = MAC_FLOW_CTRL_ENABLE; + + if (duplex) + flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT); + writel(flow, ioaddr + MAC_FLOW_CTRL); + + return; +} + +/* No PMT module supported in our SoC for the Ethernet Controller. */ +static void mac100_pmt(unsigned long ioaddr, unsigned long mode) +{ + return; +} + +static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size, + int disable_rx_ic) +{ + int i; + for (i = 0; i < ring_size; i++) { + p->des01.rx.own = 1; + p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1; + if (i == ring_size - 1) + p->des01.rx.end_ring = 1; + if (disable_rx_ic) + p->des01.rx.disable_ic = 1; + p++; + } + return; +} + +static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size) +{ + int i; + for (i = 0; i < ring_size; i++) { + p->des01.tx.own = 0; + if (i == ring_size - 1) + p->des01.tx.end_ring = 1; + p++; + } + return; +} + +static int mac100_get_tx_owner(struct dma_desc *p) +{ + return p->des01.tx.own; +} + +static int mac100_get_rx_owner(struct dma_desc *p) +{ + return p->des01.rx.own; +} + +static void mac100_set_tx_owner(struct dma_desc *p) +{ + p->des01.tx.own = 1; +} + +static void mac100_set_rx_owner(struct dma_desc *p) +{ + p->des01.rx.own = 1; +} + +static int mac100_get_tx_ls(struct dma_desc *p) +{ + return p->des01.tx.last_segment; +} + +static void mac100_release_tx_desc(struct dma_desc *p) +{ + int ter = p->des01.tx.end_ring; + + /* clean field used within the xmit */ + p->des01.tx.first_segment = 0; + p->des01.tx.last_segment = 0; + p->des01.tx.buffer1_size = 0; + + /* clean status reported */ + p->des01.tx.error_summary = 0; + p->des01.tx.underflow_error = 0; + p->des01.tx.no_carrier = 0; + p->des01.tx.loss_carrier = 0; + p->des01.tx.excessive_deferral = 0; + p->des01.tx.excessive_collisions = 0; + p->des01.tx.late_collision = 0; + p->des01.tx.heartbeat_fail = 0; + p->des01.tx.deferred = 0; + + /* set termination field */ + p->des01.tx.end_ring = ter; + + return; +} + +static void mac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, + int csum_flag) +{ + p->des01.tx.first_segment = is_fs; + p->des01.tx.buffer1_size = len; +} + +static void mac100_clear_tx_ic(struct dma_desc *p) +{ + p->des01.tx.interrupt = 0; +} + +static void mac100_close_tx_desc(struct dma_desc *p) +{ + p->des01.tx.last_segment = 1; + p->des01.tx.interrupt = 1; +} + +static int mac100_get_rx_frame_len(struct dma_desc *p) +{ + return p->des01.rx.frame_length; +} + +struct stmmac_ops mac100_driver = { + .core_init = mac100_core_init, + .dump_mac_regs = mac100_dump_mac_regs, + .dma_init = mac100_dma_init, + .dump_dma_regs = mac100_dump_dma_regs, + .dma_mode = mac100_dma_operation_mode, + .dma_diagnostic_fr = mac100_dma_diagnostic_fr, + .tx_status = mac100_get_tx_frame_status, + .rx_status = mac100_get_rx_frame_status, + .get_tx_len = mac100_get_tx_len, + .set_filter = mac100_set_filter, + .flow_ctrl = mac100_flow_ctrl, + .pmt = mac100_pmt, + .init_rx_desc = mac100_init_rx_desc, + .init_tx_desc = mac100_init_tx_desc, + .get_tx_owner = mac100_get_tx_owner, + .get_rx_owner = mac100_get_rx_owner, + .release_tx_desc = mac100_release_tx_desc, + .prepare_tx_desc = mac100_prepare_tx_desc, + .clear_tx_ic = mac100_clear_tx_ic, + .close_tx_desc = mac100_close_tx_desc, + .get_tx_ls = mac100_get_tx_ls, + .set_tx_owner = mac100_set_tx_owner, + .set_rx_owner = mac100_set_rx_owner, + .get_rx_frame_len = mac100_get_rx_frame_len, + .host_irq_status = mac100_irq_status, + .set_umac_addr = mac100_set_umac_addr, + .get_umac_addr = mac100_get_umac_addr, +}; + +struct mac_device_info *mac100_setup(unsigned long ioaddr) +{ + struct mac_device_info *mac; + + mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); + + pr_info("\tMAC 10/100\n"); + + mac->ops = &mac100_driver; + mac->hw.pmt = PMT_NOT_SUPPORTED; + mac->hw.link.port = MAC_CONTROL_PS; + mac->hw.link.duplex = MAC_CONTROL_F; + mac->hw.link.speed = 0; + mac->hw.mii.addr = MAC_MII_ADDR; + mac->hw.mii.data = MAC_MII_DATA; + + return mac; +} diff --git a/drivers/net/stmmac/mac100.h b/drivers/net/stmmac/mac100.h new file mode 100644 index 00000000000..0f8f110d004 --- /dev/null +++ b/drivers/net/stmmac/mac100.h @@ -0,0 +1,116 @@ +/******************************************************************************* + MAC 10/100 Header File + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +/*---------------------------------------------------------------------------- + * MAC BLOCK defines + *---------------------------------------------------------------------------*/ +/* MAC CSR offset */ +#define MAC_CONTROL 0x00000000 /* MAC Control */ +#define MAC_ADDR_HIGH 0x00000004 /* MAC Address High */ +#define MAC_ADDR_LOW 0x00000008 /* MAC Address Low */ +#define MAC_HASH_HIGH 0x0000000c /* Multicast Hash Table High */ +#define MAC_HASH_LOW 0x00000010 /* Multicast Hash Table Low */ +#define MAC_MII_ADDR 0x00000014 /* MII Address */ +#define MAC_MII_DATA 0x00000018 /* MII Data */ +#define MAC_FLOW_CTRL 0x0000001c /* Flow Control */ +#define MAC_VLAN1 0x00000020 /* VLAN1 Tag */ +#define MAC_VLAN2 0x00000024 /* VLAN2 Tag */ + +/* MAC CTRL defines */ +#define MAC_CONTROL_RA 0x80000000 /* Receive All Mode */ +#define MAC_CONTROL_BLE 0x40000000 /* Endian Mode */ +#define MAC_CONTROL_HBD 0x10000000 /* Heartbeat Disable */ +#define MAC_CONTROL_PS 0x08000000 /* Port Select */ +#define MAC_CONTROL_DRO 0x00800000 /* Disable Receive Own */ +#define MAC_CONTROL_EXT_LOOPBACK 0x00400000 /* Reserved (ext loopback?) */ +#define MAC_CONTROL_OM 0x00200000 /* Loopback Operating Mode */ +#define MAC_CONTROL_F 0x00100000 /* Full Duplex Mode */ +#define MAC_CONTROL_PM 0x00080000 /* Pass All Multicast */ +#define MAC_CONTROL_PR 0x00040000 /* Promiscuous Mode */ +#define MAC_CONTROL_IF 0x00020000 /* Inverse Filtering */ +#define MAC_CONTROL_PB 0x00010000 /* Pass Bad Frames */ +#define MAC_CONTROL_HO 0x00008000 /* Hash Only Filtering Mode */ +#define MAC_CONTROL_HP 0x00002000 /* Hash/Perfect Filtering Mode */ +#define MAC_CONTROL_LCC 0x00001000 /* Late Collision Control */ +#define MAC_CONTROL_DBF 0x00000800 /* Disable Broadcast Frames */ +#define MAC_CONTROL_DRTY 0x00000400 /* Disable Retry */ +#define MAC_CONTROL_ASTP 0x00000100 /* Automatic Pad Stripping */ +#define MAC_CONTROL_BOLMT_10 0x00000000 /* Back Off Limit 10 */ +#define MAC_CONTROL_BOLMT_8 0x00000040 /* Back Off Limit 8 */ +#define MAC_CONTROL_BOLMT_4 0x00000080 /* Back Off Limit 4 */ +#define MAC_CONTROL_BOLMT_1 0x000000c0 /* Back Off Limit 1 */ +#define MAC_CONTROL_DC 0x00000020 /* Deferral Check */ +#define MAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ +#define MAC_CONTROL_RE 0x00000004 /* Receiver Enable */ + +#define MAC_CORE_INIT (MAC_CONTROL_HBD | MAC_CONTROL_ASTP) + +/* MAC FLOW CTRL defines */ +#define MAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ +#define MAC_FLOW_CTRL_PT_SHIFT 16 +#define MAC_FLOW_CTRL_PASS 0x00000004 /* Pass Control Frames */ +#define MAC_FLOW_CTRL_ENABLE 0x00000002 /* Flow Control Enable */ +#define MAC_FLOW_CTRL_PAUSE 0x00000001 /* Flow Control Busy ... */ + +/* MII ADDR defines */ +#define MAC_MII_ADDR_WRITE 0x00000002 /* MII Write */ +#define MAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */ + +/*---------------------------------------------------------------------------- + * DMA BLOCK defines + *---------------------------------------------------------------------------*/ + +/* DMA Bus Mode register defines */ +#define DMA_BUS_MODE_DBO 0x00100000 /* Descriptor Byte Ordering */ +#define DMA_BUS_MODE_BLE 0x00000080 /* Big Endian/Little Endian */ +#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ +#define DMA_BUS_MODE_PBL_SHIFT 8 +#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ +#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ +#define DMA_BUS_MODE_BAR_BUS 0x00000002 /* Bar-Bus Arbitration */ +#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ +#define DMA_BUS_MODE_DEFAULT 0x00000000 + +/* DMA Control register defines */ +#define DMA_CONTROL_SF 0x00200000 /* Store And Forward */ + +/* Transmit Threshold Control */ +enum ttc_control { + DMA_CONTROL_TTC_DEFAULT = 0x00000000, /* Threshold is 32 DWORDS */ + DMA_CONTROL_TTC_64 = 0x00004000, /* Threshold is 64 DWORDS */ + DMA_CONTROL_TTC_128 = 0x00008000, /* Threshold is 128 DWORDS */ + DMA_CONTROL_TTC_256 = 0x0000c000, /* Threshold is 256 DWORDS */ + DMA_CONTROL_TTC_18 = 0x00400000, /* Threshold is 18 DWORDS */ + DMA_CONTROL_TTC_24 = 0x00404000, /* Threshold is 24 DWORDS */ + DMA_CONTROL_TTC_32 = 0x00408000, /* Threshold is 32 DWORDS */ + DMA_CONTROL_TTC_40 = 0x0040c000, /* Threshold is 40 DWORDS */ + DMA_CONTROL_SE = 0x00000008, /* Stop On Empty */ + DMA_CONTROL_OSF = 0x00000004, /* Operate On 2nd Frame */ +}; + +/* STMAC110 DMA Missed Frame Counter register defines */ +#define DMA_MISSED_FRAME_OVE 0x10000000 /* FIFO Overflow Overflow */ +#define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */ +#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */ +#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */ diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h new file mode 100644 index 00000000000..6d2eae3040e --- /dev/null +++ b/drivers/net/stmmac/stmmac.h @@ -0,0 +1,98 @@ +/******************************************************************************* + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#define DRV_MODULE_VERSION "Oct_09" + +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#define STMMAC_VLAN_TAG_USED +#include <linux/if_vlan.h> +#endif + +#include "common.h" +#ifdef CONFIG_STMMAC_TIMER +#include "stmmac_timer.h" +#endif + +struct stmmac_priv { + /* Frequently used values are kept adjacent for cache effect */ + struct dma_desc *dma_tx ____cacheline_aligned; + dma_addr_t dma_tx_phy; + struct sk_buff **tx_skbuff; + unsigned int cur_tx; + unsigned int dirty_tx; + unsigned int dma_tx_size; + int tx_coe; + int tx_coalesce; + + struct dma_desc *dma_rx ; + unsigned int cur_rx; + unsigned int dirty_rx; + struct sk_buff **rx_skbuff; + dma_addr_t *rx_skbuff_dma; + struct sk_buff_head rx_recycle; + + struct net_device *dev; + int is_gmac; + dma_addr_t dma_rx_phy; + unsigned int dma_rx_size; + int rx_csum; + unsigned int dma_buf_sz; + struct device *device; + struct mac_device_info *mac_type; + + struct stmmac_extra_stats xstats; + struct napi_struct napi; + + phy_interface_t phy_interface; + int pbl; + int bus_id; + int phy_addr; + int phy_mask; + int (*phy_reset) (void *priv); + void (*fix_mac_speed) (void *priv, unsigned int speed); + void *bsp_priv; + + int phy_irq; + struct phy_device *phydev; + int oldlink; + int speed; + int oldduplex; + unsigned int flow_ctrl; + unsigned int pause; + struct mii_bus *mii; + + u32 msg_enable; + spinlock_t lock; + int wolopts; + int wolenabled; + int shutdown; +#ifdef CONFIG_STMMAC_TIMER + struct stmmac_timer *tm; +#endif +#ifdef STMMAC_VLAN_TAG_USED + struct vlan_group *vlgrp; +#endif +}; + +extern int stmmac_mdio_unregister(struct net_device *ndev); +extern int stmmac_mdio_register(struct net_device *ndev); +extern void stmmac_set_ethtool_ops(struct net_device *netdev); diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c new file mode 100644 index 00000000000..694ebe6a075 --- /dev/null +++ b/drivers/net/stmmac/stmmac_ethtool.c @@ -0,0 +1,395 @@ +/******************************************************************************* + STMMAC Ethtool support + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <linux/phy.h> + +#include "stmmac.h" + +#define REG_SPACE_SIZE 0x1054 +#define MAC100_ETHTOOL_NAME "st_mac100" +#define GMAC_ETHTOOL_NAME "st_gmac" + +struct stmmac_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define STMMAC_STAT(m) \ + { #m, FIELD_SIZEOF(struct stmmac_extra_stats, m), \ + offsetof(struct stmmac_priv, xstats.m)} + +static const struct stmmac_stats stmmac_gstrings_stats[] = { + STMMAC_STAT(tx_underflow), + STMMAC_STAT(tx_carrier), + STMMAC_STAT(tx_losscarrier), + STMMAC_STAT(tx_heartbeat), + STMMAC_STAT(tx_deferred), + STMMAC_STAT(tx_vlan), + STMMAC_STAT(rx_vlan), + STMMAC_STAT(tx_jabber), + STMMAC_STAT(tx_frame_flushed), + STMMAC_STAT(tx_payload_error), + STMMAC_STAT(tx_ip_header_error), + STMMAC_STAT(rx_desc), + STMMAC_STAT(rx_partial), + STMMAC_STAT(rx_runt), + STMMAC_STAT(rx_toolong), + STMMAC_STAT(rx_collision), + STMMAC_STAT(rx_crc), + STMMAC_STAT(rx_lenght), + STMMAC_STAT(rx_mii), + STMMAC_STAT(rx_multicast), + STMMAC_STAT(rx_gmac_overflow), + STMMAC_STAT(rx_watchdog), + STMMAC_STAT(da_rx_filter_fail), + STMMAC_STAT(sa_rx_filter_fail), + STMMAC_STAT(rx_missed_cntr), + STMMAC_STAT(rx_overflow_cntr), + STMMAC_STAT(tx_undeflow_irq), + STMMAC_STAT(tx_process_stopped_irq), + STMMAC_STAT(tx_jabber_irq), + STMMAC_STAT(rx_overflow_irq), + STMMAC_STAT(rx_buf_unav_irq), + STMMAC_STAT(rx_process_stopped_irq), + STMMAC_STAT(rx_watchdog_irq), + STMMAC_STAT(tx_early_irq), + STMMAC_STAT(fatal_bus_error_irq), + STMMAC_STAT(threshold), + STMMAC_STAT(tx_pkt_n), + STMMAC_STAT(rx_pkt_n), + STMMAC_STAT(poll_n), + STMMAC_STAT(sched_timer_n), + STMMAC_STAT(normal_irq_n), +}; +#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) + +void stmmac_ethtool_getdrvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (!priv->is_gmac) + strcpy(info->driver, MAC100_ETHTOOL_NAME); + else + strcpy(info->driver, GMAC_ETHTOOL_NAME); + + strcpy(info->version, DRV_MODULE_VERSION); + info->fw_version[0] = '\0'; + info->n_stats = STMMAC_STATS_LEN; + return; +} + +int stmmac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct phy_device *phy = priv->phydev; + int rc; + if (phy == NULL) { + pr_err("%s: %s: PHY is not registered\n", + __func__, dev->name); + return -ENODEV; + } + if (!netif_running(dev)) { + pr_err("%s: interface is disabled: we cannot track " + "link speed / duplex setting\n", dev->name); + return -EBUSY; + } + cmd->transceiver = XCVR_INTERNAL; + spin_lock_irq(&priv->lock); + rc = phy_ethtool_gset(phy, cmd); + spin_unlock_irq(&priv->lock); + return rc; +} + +int stmmac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct phy_device *phy = priv->phydev; + int rc; + + spin_lock(&priv->lock); + rc = phy_ethtool_sset(phy, cmd); + spin_unlock(&priv->lock); + + return rc; +} + +u32 stmmac_ethtool_getmsglevel(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + return priv->msg_enable; +} + +void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level) +{ + struct stmmac_priv *priv = netdev_priv(dev); + priv->msg_enable = level; + +} + +int stmmac_check_if_running(struct net_device *dev) +{ + if (!netif_running(dev)) + return -EBUSY; + return 0; +} + +int stmmac_ethtool_get_regs_len(struct net_device *dev) +{ + return REG_SPACE_SIZE; +} + +void stmmac_ethtool_gregs(struct net_device *dev, + struct ethtool_regs *regs, void *space) +{ + int i; + u32 *reg_space = (u32 *) space; + + struct stmmac_priv *priv = netdev_priv(dev); + + memset(reg_space, 0x0, REG_SPACE_SIZE); + + if (!priv->is_gmac) { + /* MAC registers */ + for (i = 0; i < 12; i++) + reg_space[i] = readl(dev->base_addr + (i * 4)); + /* DMA registers */ + for (i = 0; i < 9; i++) + reg_space[i + 12] = + readl(dev->base_addr + (DMA_BUS_MODE + (i * 4))); + reg_space[22] = readl(dev->base_addr + DMA_CUR_TX_BUF_ADDR); + reg_space[23] = readl(dev->base_addr + DMA_CUR_RX_BUF_ADDR); + } else { + /* MAC registers */ + for (i = 0; i < 55; i++) + reg_space[i] = readl(dev->base_addr + (i * 4)); + /* DMA registers */ + for (i = 0; i < 22; i++) + reg_space[i + 55] = + readl(dev->base_addr + (DMA_BUS_MODE + (i * 4))); + } + + return; +} + +int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data) +{ + if (data) + netdev->features |= NETIF_F_HW_CSUM; + else + netdev->features &= ~NETIF_F_HW_CSUM; + + return 0; +} + +u32 stmmac_ethtool_get_rx_csum(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + return priv->rx_csum; +} + +static void +stmmac_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct stmmac_priv *priv = netdev_priv(netdev); + + spin_lock(&priv->lock); + + pause->rx_pause = 0; + pause->tx_pause = 0; + pause->autoneg = priv->phydev->autoneg; + + if (priv->flow_ctrl & FLOW_RX) + pause->rx_pause = 1; + if (priv->flow_ctrl & FLOW_TX) + pause->tx_pause = 1; + + spin_unlock(&priv->lock); + return; +} + +static int +stmmac_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct stmmac_priv *priv = netdev_priv(netdev); + struct phy_device *phy = priv->phydev; + int new_pause = FLOW_OFF; + int ret = 0; + + spin_lock(&priv->lock); + + if (pause->rx_pause) + new_pause |= FLOW_RX; + if (pause->tx_pause) + new_pause |= FLOW_TX; + + priv->flow_ctrl = new_pause; + + if (phy->autoneg) { + if (netif_running(netdev)) { + struct ethtool_cmd cmd; + /* auto-negotiation automatically restarted */ + cmd.cmd = ETHTOOL_NWAY_RST; + cmd.supported = phy->supported; + cmd.advertising = phy->advertising; + cmd.autoneg = phy->autoneg; + cmd.speed = phy->speed; + cmd.duplex = phy->duplex; + cmd.phy_address = phy->addr; + ret = phy_ethtool_sset(phy, &cmd); + } + } else { + unsigned long ioaddr = netdev->base_addr; + priv->mac_type->ops->flow_ctrl(ioaddr, phy->duplex, + priv->flow_ctrl, priv->pause); + } + spin_unlock(&priv->lock); + return ret; +} + +static void stmmac_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *dummy, u64 *data) +{ + struct stmmac_priv *priv = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + int i; + + /* Update HW stats if supported */ + priv->mac_type->ops->dma_diagnostic_fr(&dev->stats, &priv->xstats, + ioaddr); + + for (i = 0; i < STMMAC_STATS_LEN; i++) { + char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; + data[i] = (stmmac_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p); + } + + return; +} + +static int stmmac_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return STMMAC_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int i; + u8 *p = data; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < STMMAC_STATS_LEN; i++) { + memcpy(p, stmmac_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + default: + WARN_ON(1); + break; + } + return; +} + +/* Currently only support WOL through Magic packet. */ +static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + spin_lock_irq(&priv->lock); + if (priv->wolenabled == PMT_SUPPORTED) { + wol->supported = WAKE_MAGIC; + wol->wolopts = priv->wolopts; + } + spin_unlock_irq(&priv->lock); +} + +static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 support = WAKE_MAGIC; + + if (priv->wolenabled == PMT_NOT_SUPPORTED) + return -EINVAL; + + if (wol->wolopts & ~support) + return -EINVAL; + + if (wol->wolopts == 0) + device_set_wakeup_enable(priv->device, 0); + else + device_set_wakeup_enable(priv->device, 1); + + spin_lock_irq(&priv->lock); + priv->wolopts = wol->wolopts; + spin_unlock_irq(&priv->lock); + + return 0; +} + +static struct ethtool_ops stmmac_ethtool_ops = { + .begin = stmmac_check_if_running, + .get_drvinfo = stmmac_ethtool_getdrvinfo, + .get_settings = stmmac_ethtool_getsettings, + .set_settings = stmmac_ethtool_setsettings, + .get_msglevel = stmmac_ethtool_getmsglevel, + .set_msglevel = stmmac_ethtool_setmsglevel, + .get_regs = stmmac_ethtool_gregs, + .get_regs_len = stmmac_ethtool_get_regs_len, + .get_link = ethtool_op_get_link, + .get_rx_csum = stmmac_ethtool_get_rx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = stmmac_ethtool_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_pauseparam = stmmac_get_pauseparam, + .set_pauseparam = stmmac_set_pauseparam, + .get_ethtool_stats = stmmac_get_ethtool_stats, + .get_strings = stmmac_get_strings, + .get_wol = stmmac_get_wol, + .set_wol = stmmac_set_wol, + .get_sset_count = stmmac_get_sset_count, +#ifdef NETIF_F_TSO + .get_tso = ethtool_op_get_tso, + .set_tso = ethtool_op_set_tso, +#endif +}; + +void stmmac_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &stmmac_ethtool_ops); +} diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c new file mode 100644 index 00000000000..c2f14dc9ba2 --- /dev/null +++ b/drivers/net/stmmac/stmmac_main.c @@ -0,0 +1,2204 @@ +/******************************************************************************* + This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. + ST Ethernet IPs are built around a Synopsys IP Core. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> + + Documentation available at: + http://www.stlinux.com + Support available at: + https://bugzilla.stlinux.com/ +*******************************************************************************/ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/platform_device.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/skbuff.h> +#include <linux/ethtool.h> +#include <linux/if_ether.h> +#include <linux/crc32.h> +#include <linux/mii.h> +#include <linux/phy.h> +#include <linux/if_vlan.h> +#include <linux/dma-mapping.h> +#include <linux/stm/soc.h> +#include "stmmac.h" + +#define STMMAC_RESOURCE_NAME "stmmaceth" +#define PHY_RESOURCE_NAME "stmmacphy" + +#undef STMMAC_DEBUG +/*#define STMMAC_DEBUG*/ +#ifdef STMMAC_DEBUG +#define DBG(nlevel, klevel, fmt, args...) \ + ((void)(netif_msg_##nlevel(priv) && \ + printk(KERN_##klevel fmt, ## args))) +#else +#define DBG(nlevel, klevel, fmt, args...) do { } while (0) +#endif + +#undef STMMAC_RX_DEBUG +/*#define STMMAC_RX_DEBUG*/ +#ifdef STMMAC_RX_DEBUG +#define RX_DBG(fmt, args...) printk(fmt, ## args) +#else +#define RX_DBG(fmt, args...) do { } while (0) +#endif + +#undef STMMAC_XMIT_DEBUG +/*#define STMMAC_XMIT_DEBUG*/ +#ifdef STMMAC_TX_DEBUG +#define TX_DBG(fmt, args...) printk(fmt, ## args) +#else +#define TX_DBG(fmt, args...) do { } while (0) +#endif + +#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) +#define JUMBO_LEN 9000 + +/* Module parameters */ +#define TX_TIMEO 5000 /* default 5 seconds */ +static int watchdog = TX_TIMEO; +module_param(watchdog, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds"); + +static int debug = -1; /* -1: default, 0: no output, 16: all */ +module_param(debug, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)"); + +static int phyaddr = -1; +module_param(phyaddr, int, S_IRUGO); +MODULE_PARM_DESC(phyaddr, "Physical device address"); + +#define DMA_TX_SIZE 256 +static int dma_txsize = DMA_TX_SIZE; +module_param(dma_txsize, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list"); + +#define DMA_RX_SIZE 256 +static int dma_rxsize = DMA_RX_SIZE; +module_param(dma_rxsize, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list"); + +static int flow_ctrl = FLOW_OFF; +module_param(flow_ctrl, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); + +static int pause = PAUSE_TIME; +module_param(pause, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(pause, "Flow Control Pause Time"); + +#define TC_DEFAULT 64 +static int tc = TC_DEFAULT; +module_param(tc, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(tc, "DMA threshold control value"); + +#define RX_NO_COALESCE 1 /* Always interrupt on completion */ +#define TX_NO_COALESCE -1 /* No moderation by default */ + +/* Pay attention to tune this parameter; take care of both + * hardware capability and network stabitily/performance impact. + * Many tests showed that ~4ms latency seems to be good enough. */ +#ifdef CONFIG_STMMAC_TIMER +#define DEFAULT_PERIODIC_RATE 256 +static int tmrate = DEFAULT_PERIODIC_RATE; +module_param(tmrate, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(tmrate, "External timer freq. (default: 256Hz)"); +#endif + +#define DMA_BUFFER_SIZE BUF_SIZE_2KiB +static int buf_sz = DMA_BUFFER_SIZE; +module_param(buf_sz, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(buf_sz, "DMA buffer size"); + +/* In case of Giga ETH, we can enable/disable the COE for the + * transmit HW checksum computation. + * Note that, if tx csum is off in HW, SG will be still supported. */ +static int tx_coe = HW_CSUM; +module_param(tx_coe, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(tx_coe, "GMAC COE type 2 [on/off]"); + +static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFUP | + NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); + +static irqreturn_t stmmac_interrupt(int irq, void *dev_id); +static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev); + +/** + * stmmac_verify_args - verify the driver parameters. + * Description: it verifies if some wrong parameter is passed to the driver. + * Note that wrong parameters are replaced with the default values. + */ +static void stmmac_verify_args(void) +{ + if (unlikely(watchdog < 0)) + watchdog = TX_TIMEO; + if (unlikely(dma_rxsize < 0)) + dma_rxsize = DMA_RX_SIZE; + if (unlikely(dma_txsize < 0)) + dma_txsize = DMA_TX_SIZE; + if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB))) + buf_sz = DMA_BUFFER_SIZE; + if (unlikely(flow_ctrl > 1)) + flow_ctrl = FLOW_AUTO; + else if (likely(flow_ctrl < 0)) + flow_ctrl = FLOW_OFF; + if (unlikely((pause < 0) || (pause > 0xffff))) + pause = PAUSE_TIME; + + return; +} + +#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG) +static void print_pkt(unsigned char *buf, int len) +{ + int j; + pr_info("len = %d byte, buf addr: 0x%p", len, buf); + for (j = 0; j < len; j++) { + if ((j % 16) == 0) + pr_info("\n %03x:", j); + pr_info(" %02x", buf[j]); + } + pr_info("\n"); + return; +} +#endif + +/* minimum number of free TX descriptors required to wake up TX process */ +#define STMMAC_TX_THRESH(x) (x->dma_tx_size/4) + +static inline u32 stmmac_tx_avail(struct stmmac_priv *priv) +{ + return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1; +} + +/** + * stmmac_adjust_link + * @dev: net device structure + * Description: it adjusts the link parameters. + */ +static void stmmac_adjust_link(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + unsigned long ioaddr = dev->base_addr; + unsigned long flags; + int new_state = 0; + unsigned int fc = priv->flow_ctrl, pause_time = priv->pause; + + if (phydev == NULL) + return; + + DBG(probe, DEBUG, "stmmac_adjust_link: called. address %d link %d\n", + phydev->addr, phydev->link); + + spin_lock_irqsave(&priv->lock, flags); + if (phydev->link) { + u32 ctrl = readl(ioaddr + MAC_CTRL_REG); + + /* Now we make sure that we can be in full duplex mode. + * If not, we operate in half-duplex mode. */ + if (phydev->duplex != priv->oldduplex) { + new_state = 1; + if (!(phydev->duplex)) + ctrl &= ~priv->mac_type->hw.link.duplex; + else + ctrl |= priv->mac_type->hw.link.duplex; + priv->oldduplex = phydev->duplex; + } + /* Flow Control operation */ + if (phydev->pause) + priv->mac_type->ops->flow_ctrl(ioaddr, phydev->duplex, + fc, pause_time); + + if (phydev->speed != priv->speed) { + new_state = 1; + switch (phydev->speed) { + case 1000: + if (likely(priv->is_gmac)) + ctrl &= ~priv->mac_type->hw.link.port; + break; + case 100: + case 10: + if (priv->is_gmac) { + ctrl |= priv->mac_type->hw.link.port; + if (phydev->speed == SPEED_100) { + ctrl |= + priv->mac_type->hw.link. + speed; + } else { + ctrl &= + ~(priv->mac_type->hw. + link.speed); + } + } else { + ctrl &= ~priv->mac_type->hw.link.port; + } + priv->fix_mac_speed(priv->bsp_priv, + phydev->speed); + break; + default: + if (netif_msg_link(priv)) + pr_warning("%s: Speed (%d) is not 10" + " or 100!\n", dev->name, phydev->speed); + break; + } + + priv->speed = phydev->speed; + } + + writel(ctrl, ioaddr + MAC_CTRL_REG); + + if (!priv->oldlink) { + new_state = 1; + priv->oldlink = 1; + } + } else if (priv->oldlink) { + new_state = 1; + priv->oldlink = 0; + priv->speed = 0; + priv->oldduplex = -1; + } + + if (new_state && netif_msg_link(priv)) + phy_print_status(phydev); + + spin_unlock_irqrestore(&priv->lock, flags); + + DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n"); +} + +/** + * stmmac_init_phy - PHY initialization + * @dev: net device structure + * Description: it initializes the driver's PHY state, and attaches the PHY + * to the mac driver. + * Return value: + * 0 on success + */ +static int stmmac_init_phy(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct phy_device *phydev; + char phy_id[BUS_ID_SIZE]; /* PHY to connect */ + char bus_id[BUS_ID_SIZE]; + + priv->oldlink = 0; + priv->speed = 0; + priv->oldduplex = -1; + + if (priv->phy_addr == -1) { + /* We don't have a PHY, so do nothing */ + return 0; + } + + snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id); + snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, bus_id, priv->phy_addr); + pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id); + + phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0, + priv->phy_interface); + + if (IS_ERR(phydev)) { + pr_err("%s: Could not attach to PHY\n", dev->name); + return PTR_ERR(phydev); + } + + /* + * Broken HW is sometimes missing the pull-up resistor on the + * MDIO line, which results in reads to non-existent devices returning + * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent + * device as well. + * Note: phydev->phy_id is the result of reading the UID PHY registers. + */ + if (phydev->phy_id == 0) { + phy_disconnect(phydev); + return -ENODEV; + } + pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" + " Link = %d\n", dev->name, phydev->phy_id, phydev->link); + + priv->phydev = phydev; + + return 0; +} + +static inline void stmmac_mac_enable_rx(unsigned long ioaddr) +{ + u32 value = readl(ioaddr + MAC_CTRL_REG); + value |= MAC_RNABLE_RX; + /* Set the RE (receive enable bit into the MAC CTRL register). */ + writel(value, ioaddr + MAC_CTRL_REG); +} + +static inline void stmmac_mac_enable_tx(unsigned long ioaddr) +{ + u32 value = readl(ioaddr + MAC_CTRL_REG); + value |= MAC_ENABLE_TX; + /* Set the TE (transmit enable bit into the MAC CTRL register). */ + writel(value, ioaddr + MAC_CTRL_REG); +} + +static inline void stmmac_mac_disable_rx(unsigned long ioaddr) +{ + u32 value = readl(ioaddr + MAC_CTRL_REG); + value &= ~MAC_RNABLE_RX; + writel(value, ioaddr + MAC_CTRL_REG); +} + +static inline void stmmac_mac_disable_tx(unsigned long ioaddr) +{ + u32 value = readl(ioaddr + MAC_CTRL_REG); + value &= ~MAC_ENABLE_TX; + writel(value, ioaddr + MAC_CTRL_REG); +} + +/** + * display_ring + * @p: pointer to the ring. + * @size: size of the ring. + * Description: display all the descriptors within the ring. + */ +static void display_ring(struct dma_desc *p, int size) +{ + struct tmp_s { + u64 a; + unsigned int b; + unsigned int c; + }; + int i; + for (i = 0; i < size; i++) { + struct tmp_s *x = (struct tmp_s *)(p + i); + pr_info("\t%d [0x%x]: DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x", + i, (unsigned int)virt_to_phys(&p[i]), + (unsigned int)(x->a), (unsigned int)((x->a) >> 32), + x->b, x->c); + pr_info("\n"); + } +} + +/** + * init_dma_desc_rings - init the RX/TX descriptor rings + * @dev: net device structure + * Description: this function initializes the DMA RX/TX descriptors + * and allocates the socket buffers. + */ +static void init_dma_desc_rings(struct net_device *dev) +{ + int i; + struct stmmac_priv *priv = netdev_priv(dev); + struct sk_buff *skb; + unsigned int txsize = priv->dma_tx_size; + unsigned int rxsize = priv->dma_rx_size; + unsigned int bfsize = priv->dma_buf_sz; + int buff2_needed = 0; + int dis_ic = 0; + +#ifdef CONFIG_STMMAC_TIMER + /* Using Timers disable interrupts on completion for the reception */ + dis_ic = 1; +#endif + /* Set the Buffer size according to the MTU; + * indeed, in case of jumbo we need to bump-up the buffer sizes. + */ + if (unlikely(dev->mtu >= BUF_SIZE_8KiB)) + bfsize = BUF_SIZE_16KiB; + else if (unlikely(dev->mtu >= BUF_SIZE_4KiB)) + bfsize = BUF_SIZE_8KiB; + else if (unlikely(dev->mtu >= BUF_SIZE_2KiB)) + bfsize = BUF_SIZE_4KiB; + else if (unlikely(dev->mtu >= DMA_BUFFER_SIZE)) + bfsize = BUF_SIZE_2KiB; + else + bfsize = DMA_BUFFER_SIZE; + + /* If the MTU exceeds 8k so use the second buffer in the chain */ + if (bfsize >= BUF_SIZE_8KiB) + buff2_needed = 1; + + DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n", + txsize, rxsize, bfsize); + + priv->rx_skbuff_dma = kmalloc(rxsize * sizeof(dma_addr_t), GFP_KERNEL); + priv->rx_skbuff = + kmalloc(sizeof(struct sk_buff *) * rxsize, GFP_KERNEL); + priv->dma_rx = + (struct dma_desc *)dma_alloc_coherent(priv->device, + rxsize * + sizeof(struct dma_desc), + &priv->dma_rx_phy, + GFP_KERNEL); + priv->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * txsize, + GFP_KERNEL); + priv->dma_tx = + (struct dma_desc *)dma_alloc_coherent(priv->device, + txsize * + sizeof(struct dma_desc), + &priv->dma_tx_phy, + GFP_KERNEL); + + if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL)) { + pr_err("%s:ERROR allocating the DMA Tx/Rx desc\n", __func__); + return; + } + + DBG(probe, INFO, "stmmac (%s) DMA desc rings: virt addr (Rx %p, " + "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n", + dev->name, priv->dma_rx, priv->dma_tx, + (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy); + + /* RX INITIALIZATION */ + DBG(probe, INFO, "stmmac: SKB addresses:\n" + "skb\t\tskb data\tdma data\n"); + + for (i = 0; i < rxsize; i++) { + struct dma_desc *p = priv->dma_rx + i; + + skb = netdev_alloc_skb_ip_align(dev, bfsize); + if (unlikely(skb == NULL)) { + pr_err("%s: Rx init fails; skb is NULL\n", __func__); + break; + } + priv->rx_skbuff[i] = skb; + priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, + bfsize, DMA_FROM_DEVICE); + + p->des2 = priv->rx_skbuff_dma[i]; + if (unlikely(buff2_needed)) + p->des3 = p->des2 + BUF_SIZE_8KiB; + DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], + priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]); + } + priv->cur_rx = 0; + priv->dirty_rx = (unsigned int)(i - rxsize); + priv->dma_buf_sz = bfsize; + buf_sz = bfsize; + + /* TX INITIALIZATION */ + for (i = 0; i < txsize; i++) { + priv->tx_skbuff[i] = NULL; + priv->dma_tx[i].des2 = 0; + } + priv->dirty_tx = 0; + priv->cur_tx = 0; + + /* Clear the Rx/Tx descriptors */ + priv->mac_type->ops->init_rx_desc(priv->dma_rx, rxsize, dis_ic); + priv->mac_type->ops->init_tx_desc(priv->dma_tx, txsize); + + if (netif_msg_hw(priv)) { + pr_info("RX descriptor ring:\n"); + display_ring(priv->dma_rx, rxsize); + pr_info("TX descriptor ring:\n"); + display_ring(priv->dma_tx, txsize); + } + return; +} + +static void dma_free_rx_skbufs(struct stmmac_priv *priv) +{ + int i; + + for (i = 0; i < priv->dma_rx_size; i++) { + if (priv->rx_skbuff[i]) { + dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], + priv->dma_buf_sz, DMA_FROM_DEVICE); + dev_kfree_skb_any(priv->rx_skbuff[i]); + } + priv->rx_skbuff[i] = NULL; + } + return; +} + +static void dma_free_tx_skbufs(struct stmmac_priv *priv) +{ + int i; + + for (i = 0; i < priv->dma_tx_size; i++) { + if (priv->tx_skbuff[i] != NULL) { + struct dma_desc *p = priv->dma_tx + i; + if (p->des2) + dma_unmap_single(priv->device, p->des2, + priv->mac_type->ops->get_tx_len(p), + DMA_TO_DEVICE); + dev_kfree_skb_any(priv->tx_skbuff[i]); + priv->tx_skbuff[i] = NULL; + } + } + return; +} + +static void free_dma_desc_resources(struct stmmac_priv *priv) +{ + /* Release the DMA TX/RX socket buffers */ + dma_free_rx_skbufs(priv); + dma_free_tx_skbufs(priv); + + /* Free the region of consistent memory previously allocated for + * the DMA */ + dma_free_coherent(priv->device, + priv->dma_tx_size * sizeof(struct dma_desc), + priv->dma_tx, priv->dma_tx_phy); + dma_free_coherent(priv->device, + priv->dma_rx_size * sizeof(struct dma_desc), + priv->dma_rx, priv->dma_rx_phy); + kfree(priv->rx_skbuff_dma); + kfree(priv->rx_skbuff); + kfree(priv->tx_skbuff); + + return; +} + +/** + * stmmac_dma_start_tx + * @ioaddr: device I/O address + * Description: this function starts the DMA tx process. + */ +static void stmmac_dma_start_tx(unsigned long ioaddr) +{ + u32 value = readl(ioaddr + DMA_CONTROL); + value |= DMA_CONTROL_ST; + writel(value, ioaddr + DMA_CONTROL); + return; +} + +static void stmmac_dma_stop_tx(unsigned long ioaddr) +{ + u32 value = readl(ioaddr + DMA_CONTROL); + value &= ~DMA_CONTROL_ST; + writel(value, ioaddr + DMA_CONTROL); + return; +} + +/** + * stmmac_dma_start_rx + * @ioaddr: device I/O address + * Description: this function starts the DMA rx process. + */ +static void stmmac_dma_start_rx(unsigned long ioaddr) +{ + u32 value = readl(ioaddr + DMA_CONTROL); + value |= DMA_CONTROL_SR; + writel(value, ioaddr + DMA_CONTROL); + + return; +} + +static void stmmac_dma_stop_rx(unsigned long ioaddr) +{ + u32 value = readl(ioaddr + DMA_CONTROL); + value &= ~DMA_CONTROL_SR; + writel(value, ioaddr + DMA_CONTROL); + + return; +} + +/** + * stmmac_dma_operation_mode - HW DMA operation mode + * @priv : pointer to the private device structure. + * Description: it sets the DMA operation mode: tx/rx DMA thresholds + * or Store-And-Forward capability. It also verifies the COE for the + * transmission in case of Giga ETH. + */ +static void stmmac_dma_operation_mode(struct stmmac_priv *priv) +{ + if (!priv->is_gmac) { + /* MAC 10/100 */ + priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, 0); + priv->tx_coe = NO_HW_CSUM; + } else { + if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) { + priv->mac_type->ops->dma_mode(priv->dev->base_addr, + SF_DMA_MODE, SF_DMA_MODE); + tc = SF_DMA_MODE; + priv->tx_coe = HW_CSUM; + } else { + /* Checksum computation is performed in software. */ + priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, + SF_DMA_MODE); + priv->tx_coe = NO_HW_CSUM; + } + } + tx_coe = priv->tx_coe; + + return; +} + +#ifdef STMMAC_DEBUG +/** + * show_tx_process_state + * @status: tx descriptor status field + * Description: it shows the Transmit Process State for CSR5[22:20] + */ +static void show_tx_process_state(unsigned int status) +{ + unsigned int state; + state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT; + + switch (state) { + case 0: + pr_info("- TX (Stopped): Reset or Stop command\n"); + break; + case 1: + pr_info("- TX (Running):Fetching the Tx desc\n"); + break; + case 2: + pr_info("- TX (Running): Waiting for end of tx\n"); + break; + case 3: + pr_info("- TX (Running): Reading the data " + "and queuing the data into the Tx buf\n"); + break; + case 6: + pr_info("- TX (Suspended): Tx Buff Underflow " + "or an unavailable Transmit descriptor\n"); + break; + case 7: + pr_info("- TX (Running): Closing Tx descriptor\n"); + break; + default: + break; + } + return; +} + +/** + * show_rx_process_state + * @status: rx descriptor status field + * Description: it shows the Receive Process State for CSR5[19:17] + */ +static void show_rx_process_state(unsigned int status) +{ + unsigned int state; + state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT; + + switch (state) { + case 0: + pr_info("- RX (Stopped): Reset or Stop command\n"); + break; + case 1: + pr_info("- RX (Running): Fetching the Rx desc\n"); + break; + case 2: + pr_info("- RX (Running):Checking for end of pkt\n"); + break; + case 3: + pr_info("- RX (Running): Waiting for Rx pkt\n"); + break; + case 4: + pr_info("- RX (Suspended): Unavailable Rx buf\n"); + break; + case 5: + pr_info("- RX (Running): Closing Rx descriptor\n"); + break; + case 6: + pr_info("- RX(Running): Flushing the current frame" + " from the Rx buf\n"); + break; + case 7: + pr_info("- RX (Running): Queuing the Rx frame" + " from the Rx buf into memory\n"); + break; + default: + break; + } + return; +} +#endif + +/** + * stmmac_tx: + * @priv: private driver structure + * Description: it reclaims resources after transmission completes. + */ +static void stmmac_tx(struct stmmac_priv *priv) +{ + unsigned int txsize = priv->dma_tx_size; + unsigned long ioaddr = priv->dev->base_addr; + + while (priv->dirty_tx != priv->cur_tx) { + int last; + unsigned int entry = priv->dirty_tx % txsize; + struct sk_buff *skb = priv->tx_skbuff[entry]; + struct dma_desc *p = priv->dma_tx + entry; + + /* Check if the descriptor is owned by the DMA. */ + if (priv->mac_type->ops->get_tx_owner(p)) + break; + + /* Verify tx error by looking at the last segment */ + last = priv->mac_type->ops->get_tx_ls(p); + if (likely(last)) { + int tx_error = + priv->mac_type->ops->tx_status(&priv->dev->stats, + &priv->xstats, + p, ioaddr); + if (likely(tx_error == 0)) { + priv->dev->stats.tx_packets++; + priv->xstats.tx_pkt_n++; + } else + priv->dev->stats.tx_errors++; + } + TX_DBG("%s: curr %d, dirty %d\n", __func__, + priv->cur_tx, priv->dirty_tx); + + if (likely(p->des2)) + dma_unmap_single(priv->device, p->des2, + priv->mac_type->ops->get_tx_len(p), + DMA_TO_DEVICE); + if (unlikely(p->des3)) + p->des3 = 0; + + if (likely(skb != NULL)) { + /* + * If there's room in the queue (limit it to size) + * we add this skb back into the pool, + * if it's the right size. + */ + if ((skb_queue_len(&priv->rx_recycle) < + priv->dma_rx_size) && + skb_recycle_check(skb, priv->dma_buf_sz)) + __skb_queue_head(&priv->rx_recycle, skb); + else + dev_kfree_skb(skb); + + priv->tx_skbuff[entry] = NULL; + } + + priv->mac_type->ops->release_tx_desc(p); + + entry = (++priv->dirty_tx) % txsize; + } + if (unlikely(netif_queue_stopped(priv->dev) && + stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) { + netif_tx_lock(priv->dev); + if (netif_queue_stopped(priv->dev) && + stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) { + TX_DBG("%s: restart transmit\n", __func__); + netif_wake_queue(priv->dev); + } + netif_tx_unlock(priv->dev); + } + return; +} + +static inline void stmmac_enable_irq(struct stmmac_priv *priv) +{ +#ifndef CONFIG_STMMAC_TIMER + writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA); +#else + priv->tm->timer_start(tmrate); +#endif +} + +static inline void stmmac_disable_irq(struct stmmac_priv *priv) +{ +#ifndef CONFIG_STMMAC_TIMER + writel(0, priv->dev->base_addr + DMA_INTR_ENA); +#else + priv->tm->timer_stop(); +#endif +} + +static int stmmac_has_work(struct stmmac_priv *priv) +{ + unsigned int has_work = 0; + int rxret, tx_work = 0; + + rxret = priv->mac_type->ops->get_rx_owner(priv->dma_rx + + (priv->cur_rx % priv->dma_rx_size)); + + if (priv->dirty_tx != priv->cur_tx) + tx_work = 1; + + if (likely(!rxret || tx_work)) + has_work = 1; + + return has_work; +} + +static inline void _stmmac_schedule(struct stmmac_priv *priv) +{ + if (likely(stmmac_has_work(priv))) { + stmmac_disable_irq(priv); + napi_schedule(&priv->napi); + } +} + +#ifdef CONFIG_STMMAC_TIMER +void stmmac_schedule(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + priv->xstats.sched_timer_n++; + + _stmmac_schedule(priv); + + return; +} + +static void stmmac_no_timer_started(unsigned int x) +{; +}; + +static void stmmac_no_timer_stopped(void) +{; +}; +#endif + +/** + * stmmac_tx_err: + * @priv: pointer to the private device structure + * Description: it cleans the descriptors and restarts the transmission + * in case of errors. + */ +static void stmmac_tx_err(struct stmmac_priv *priv) +{ + netif_stop_queue(priv->dev); + + stmmac_dma_stop_tx(priv->dev->base_addr); + dma_free_tx_skbufs(priv); + priv->mac_type->ops->init_tx_desc(priv->dma_tx, priv->dma_tx_size); + priv->dirty_tx = 0; + priv->cur_tx = 0; + stmmac_dma_start_tx(priv->dev->base_addr); + + priv->dev->stats.tx_errors++; + netif_wake_queue(priv->dev); + + return; +} + +/** + * stmmac_dma_interrupt - Interrupt handler for the driver + * @dev: net device structure + * Description: Interrupt handler for the driver (DMA). + */ +static void stmmac_dma_interrupt(struct net_device *dev) +{ + unsigned long ioaddr = dev->base_addr; + struct stmmac_priv *priv = netdev_priv(dev); + /* read the status register (CSR5) */ + u32 intr_status = readl(ioaddr + DMA_STATUS); + + DBG(intr, INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status); + +#ifdef STMMAC_DEBUG + /* It displays the DMA transmit process state (CSR5 register) */ + if (netif_msg_tx_done(priv)) + show_tx_process_state(intr_status); + if (netif_msg_rx_status(priv)) + show_rx_process_state(intr_status); +#endif + /* ABNORMAL interrupts */ + if (unlikely(intr_status & DMA_STATUS_AIS)) { + DBG(intr, INFO, "CSR5[15] DMA ABNORMAL IRQ: "); + if (unlikely(intr_status & DMA_STATUS_UNF)) { + DBG(intr, INFO, "transmit underflow\n"); + if (unlikely(tc != SF_DMA_MODE) + && (tc <= 256)) { + /* Try to bump up the threshold */ + tc += 64; + priv->mac_type->ops->dma_mode(ioaddr, tc, + SF_DMA_MODE); + priv->xstats.threshold = tc; + } + stmmac_tx_err(priv); + priv->xstats.tx_undeflow_irq++; + } + if (unlikely(intr_status & DMA_STATUS_TJT)) { + DBG(intr, INFO, "transmit jabber\n"); + priv->xstats.tx_jabber_irq++; + } + if (unlikely(intr_status & DMA_STATUS_OVF)) { + DBG(intr, INFO, "recv overflow\n"); + priv->xstats.rx_overflow_irq++; + } + if (unlikely(intr_status & DMA_STATUS_RU)) { + DBG(intr, INFO, "receive buffer unavailable\n"); + priv->xstats.rx_buf_unav_irq++; + } + if (unlikely(intr_status & DMA_STATUS_RPS)) { + DBG(intr, INFO, "receive process stopped\n"); + priv->xstats.rx_process_stopped_irq++; + } + if (unlikely(intr_status & DMA_STATUS_RWT)) { + DBG(intr, INFO, "receive watchdog\n"); + priv->xstats.rx_watchdog_irq++; + } + if (unlikely(intr_status & DMA_STATUS_ETI)) { + DBG(intr, INFO, "transmit early interrupt\n"); + priv->xstats.tx_early_irq++; + } + if (unlikely(intr_status & DMA_STATUS_TPS)) { + DBG(intr, INFO, "transmit process stopped\n"); + priv->xstats.tx_process_stopped_irq++; + stmmac_tx_err(priv); + } + if (unlikely(intr_status & DMA_STATUS_FBI)) { + DBG(intr, INFO, "fatal bus error\n"); + priv->xstats.fatal_bus_error_irq++; + stmmac_tx_err(priv); + } + } + + /* TX/RX NORMAL interrupts */ + if (intr_status & DMA_STATUS_NIS) { + priv->xstats.normal_irq_n++; + if (likely((intr_status & DMA_STATUS_RI) || + (intr_status & (DMA_STATUS_TI)))) + _stmmac_schedule(priv); + } + + /* Optional hardware blocks, interrupts should be disabled */ + if (unlikely(intr_status & + (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI))) + pr_info("%s: unexpected status %08x\n", __func__, intr_status); + + /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */ + writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS); + + DBG(intr, INFO, "\n\n"); + + return; +} + +/** + * stmmac_open - open entry point of the driver + * @dev : pointer to the device structure. + * Description: + * This function is the open entry point of the driver. + * Return value: + * 0 on success and an appropriate (-)ve integer as defined in errno.h + * file on failure. + */ +static int stmmac_open(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + int ret; + + /* Check that the MAC address is valid. If its not, refuse + * to bring the device up. The user must specify an + * address using the following linux command: + * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */ + if (!is_valid_ether_addr(dev->dev_addr)) { + random_ether_addr(dev->dev_addr); + pr_warning("%s: generated random MAC address %pM\n", dev->name, + dev->dev_addr); + } + + stmmac_verify_args(); + + ret = stmmac_init_phy(dev); + if (unlikely(ret)) { + pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); + return ret; + } + + /* Request the IRQ lines */ + ret = request_irq(dev->irq, &stmmac_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { + pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n", + __func__, dev->irq, ret); + return ret; + } + +#ifdef CONFIG_STMMAC_TIMER + priv->tm = kmalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); + if (unlikely(priv->tm == NULL)) { + pr_err("%s: ERROR: timer memory alloc failed \n", __func__); + return -ENOMEM; + } + priv->tm->freq = tmrate; + + /* Test if the HW timer can be actually used. + * In case of failure continue with no timer. */ + if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) { + pr_warning("stmmaceth: cannot attach the HW timer\n"); + tmrate = 0; + priv->tm->freq = 0; + priv->tm->timer_start = stmmac_no_timer_started; + priv->tm->timer_stop = stmmac_no_timer_stopped; + } +#endif + + /* Create and initialize the TX/RX descriptors chains. */ + priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); + priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); + priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); + init_dma_desc_rings(dev); + + /* DMA initialization and SW reset */ + if (unlikely(priv->mac_type->ops->dma_init(ioaddr, + priv->pbl, priv->dma_tx_phy, priv->dma_rx_phy) < 0)) { + + pr_err("%s: DMA initialization failed\n", __func__); + return -1; + } + + /* Copy the MAC addr into the HW */ + priv->mac_type->ops->set_umac_addr(ioaddr, dev->dev_addr, 0); + /* Initialize the MAC Core */ + priv->mac_type->ops->core_init(ioaddr); + + priv->shutdown = 0; + + /* Initialise the MMC (if present) to disable all interrupts. */ + writel(0xffffffff, ioaddr + MMC_HIGH_INTR_MASK); + writel(0xffffffff, ioaddr + MMC_LOW_INTR_MASK); + + /* Enable the MAC Rx/Tx */ + stmmac_mac_enable_rx(ioaddr); + stmmac_mac_enable_tx(ioaddr); + + /* Set the HW DMA mode and the COE */ + stmmac_dma_operation_mode(priv); + + /* Extra statistics */ + memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); + priv->xstats.threshold = tc; + + /* Start the ball rolling... */ + DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); + stmmac_dma_start_tx(ioaddr); + stmmac_dma_start_rx(ioaddr); + +#ifdef CONFIG_STMMAC_TIMER + priv->tm->timer_start(tmrate); +#endif + /* Dump DMA/MAC registers */ + if (netif_msg_hw(priv)) { + priv->mac_type->ops->dump_mac_regs(ioaddr); + priv->mac_type->ops->dump_dma_regs(ioaddr); + } + + if (priv->phydev) + phy_start(priv->phydev); + + napi_enable(&priv->napi); + skb_queue_head_init(&priv->rx_recycle); + netif_start_queue(dev); + return 0; +} + +/** + * stmmac_release - close entry point of the driver + * @dev : device pointer. + * Description: + * This is the stop entry point of the driver. + */ +static int stmmac_release(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + /* Stop and disconnect the PHY */ + if (priv->phydev) { + phy_stop(priv->phydev); + phy_disconnect(priv->phydev); + priv->phydev = NULL; + } + + netif_stop_queue(dev); + +#ifdef CONFIG_STMMAC_TIMER + /* Stop and release the timer */ + stmmac_close_ext_timer(); + if (priv->tm != NULL) + kfree(priv->tm); +#endif + napi_disable(&priv->napi); + skb_queue_purge(&priv->rx_recycle); + + /* Free the IRQ lines */ + free_irq(dev->irq, dev); + + /* Stop TX/RX DMA and clear the descriptors */ + stmmac_dma_stop_tx(dev->base_addr); + stmmac_dma_stop_rx(dev->base_addr); + + /* Release and free the Rx/Tx resources */ + free_dma_desc_resources(priv); + + /* Disable the MAC core */ + stmmac_mac_disable_tx(dev->base_addr); + stmmac_mac_disable_rx(dev->base_addr); + + netif_carrier_off(dev); + + return 0; +} + +/* + * To perform emulated hardware segmentation on skb. + */ +static int stmmac_sw_tso(struct stmmac_priv *priv, struct sk_buff *skb) +{ + struct sk_buff *segs, *curr_skb; + int gso_segs = skb_shinfo(skb)->gso_segs; + + /* Estimate the number of fragments in the worst case */ + if (unlikely(stmmac_tx_avail(priv) < gso_segs)) { + netif_stop_queue(priv->dev); + TX_DBG(KERN_ERR "%s: TSO BUG! Tx Ring full when queue awake\n", + __func__); + if (stmmac_tx_avail(priv) < gso_segs) + return NETDEV_TX_BUSY; + + netif_wake_queue(priv->dev); + } + TX_DBG("\tstmmac_sw_tso: segmenting: skb %p (len %d)\n", + skb, skb->len); + + segs = skb_gso_segment(skb, priv->dev->features & ~NETIF_F_TSO); + if (unlikely(IS_ERR(segs))) + goto sw_tso_end; + + do { + curr_skb = segs; + segs = segs->next; + TX_DBG("\t\tcurrent skb->len: %d, *curr %p," + "*next %p\n", curr_skb->len, curr_skb, segs); + curr_skb->next = NULL; + stmmac_xmit(curr_skb, priv->dev); + } while (segs); + +sw_tso_end: + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb, + struct net_device *dev, + int csum_insertion) +{ + struct stmmac_priv *priv = netdev_priv(dev); + unsigned int nopaged_len = skb_headlen(skb); + unsigned int txsize = priv->dma_tx_size; + unsigned int entry = priv->cur_tx % txsize; + struct dma_desc *desc = priv->dma_tx + entry; + + if (nopaged_len > BUF_SIZE_8KiB) { + + int buf2_size = nopaged_len - BUF_SIZE_8KiB; + + desc->des2 = dma_map_single(priv->device, skb->data, + BUF_SIZE_8KiB, DMA_TO_DEVICE); + desc->des3 = desc->des2 + BUF_SIZE_4KiB; + priv->mac_type->ops->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB, + csum_insertion); + + entry = (++priv->cur_tx) % txsize; + desc = priv->dma_tx + entry; + + desc->des2 = dma_map_single(priv->device, + skb->data + BUF_SIZE_8KiB, + buf2_size, DMA_TO_DEVICE); + desc->des3 = desc->des2 + BUF_SIZE_4KiB; + priv->mac_type->ops->prepare_tx_desc(desc, 0, + buf2_size, csum_insertion); + priv->mac_type->ops->set_tx_owner(desc); + priv->tx_skbuff[entry] = NULL; + } else { + desc->des2 = dma_map_single(priv->device, skb->data, + nopaged_len, DMA_TO_DEVICE); + desc->des3 = desc->des2 + BUF_SIZE_4KiB; + priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len, + csum_insertion); + } + return entry; +} + +/** + * stmmac_xmit: + * @skb : the socket buffer + * @dev : device pointer + * Description : Tx entry point of the driver. + */ +static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + unsigned int txsize = priv->dma_tx_size; + unsigned int entry; + int i, csum_insertion = 0; + int nfrags = skb_shinfo(skb)->nr_frags; + struct dma_desc *desc, *first; + + if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { + if (!netif_queue_stopped(dev)) { + netif_stop_queue(dev); + /* This is a hard error, log it. */ + pr_err("%s: BUG! Tx Ring full when queue awake\n", + __func__); + } + return NETDEV_TX_BUSY; + } + + entry = priv->cur_tx % txsize; + +#ifdef STMMAC_XMIT_DEBUG + if ((skb->len > ETH_FRAME_LEN) || nfrags) + pr_info("stmmac xmit:\n" + "\tskb addr %p - len: %d - nopaged_len: %d\n" + "\tn_frags: %d - ip_summed: %d - %s gso\n", + skb, skb->len, skb_headlen(skb), nfrags, skb->ip_summed, + !skb_is_gso(skb) ? "isn't" : "is"); +#endif + + if (unlikely(skb_is_gso(skb))) + return stmmac_sw_tso(priv, skb); + + if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) { + if (likely(priv->tx_coe == NO_HW_CSUM)) + skb_checksum_help(skb); + else + csum_insertion = 1; + } + + desc = priv->dma_tx + entry; + first = desc; + +#ifdef STMMAC_XMIT_DEBUG + if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN)) + pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n" + "\t\tn_frags: %d, ip_summed: %d\n", + skb->len, skb_headlen(skb), nfrags, skb->ip_summed); +#endif + priv->tx_skbuff[entry] = skb; + if (unlikely(skb->len >= BUF_SIZE_4KiB)) { + entry = stmmac_handle_jumbo_frames(skb, dev, csum_insertion); + desc = priv->dma_tx + entry; + } else { + unsigned int nopaged_len = skb_headlen(skb); + desc->des2 = dma_map_single(priv->device, skb->data, + nopaged_len, DMA_TO_DEVICE); + priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len, + csum_insertion); + } + + for (i = 0; i < nfrags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + int len = frag->size; + + entry = (++priv->cur_tx) % txsize; + desc = priv->dma_tx + entry; + + TX_DBG("\t[entry %d] segment len: %d\n", entry, len); + desc->des2 = dma_map_page(priv->device, frag->page, + frag->page_offset, + len, DMA_TO_DEVICE); + priv->tx_skbuff[entry] = NULL; + priv->mac_type->ops->prepare_tx_desc(desc, 0, len, + csum_insertion); + priv->mac_type->ops->set_tx_owner(desc); + } + + /* Interrupt on completition only for the latest segment */ + priv->mac_type->ops->close_tx_desc(desc); +#ifdef CONFIG_STMMAC_TIMER + /* Clean IC while using timers */ + priv->mac_type->ops->clear_tx_ic(desc); +#endif + /* To avoid raise condition */ + priv->mac_type->ops->set_tx_owner(first); + + priv->cur_tx++; + +#ifdef STMMAC_XMIT_DEBUG + if (netif_msg_pktdata(priv)) { + pr_info("stmmac xmit: current=%d, dirty=%d, entry=%d, " + "first=%p, nfrags=%d\n", + (priv->cur_tx % txsize), (priv->dirty_tx % txsize), + entry, first, nfrags); + display_ring(priv->dma_tx, txsize); + pr_info(">>> frame to be transmitted: "); + print_pkt(skb->data, skb->len); + } +#endif + if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { + TX_DBG("%s: stop transmitted packets\n", __func__); + netif_stop_queue(dev); + } + + dev->stats.tx_bytes += skb->len; + + /* CSR1 enables the transmit DMA to check for new descriptor */ + writel(1, dev->base_addr + DMA_XMT_POLL_DEMAND); + + return NETDEV_TX_OK; +} + +static inline void stmmac_rx_refill(struct stmmac_priv *priv) +{ + unsigned int rxsize = priv->dma_rx_size; + int bfsize = priv->dma_buf_sz; + struct dma_desc *p = priv->dma_rx; + + for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) { + unsigned int entry = priv->dirty_rx % rxsize; + if (likely(priv->rx_skbuff[entry] == NULL)) { + struct sk_buff *skb; + + skb = __skb_dequeue(&priv->rx_recycle); + if (skb == NULL) + skb = netdev_alloc_skb_ip_align(priv->dev, + bfsize); + + if (unlikely(skb == NULL)) + break; + + priv->rx_skbuff[entry] = skb; + priv->rx_skbuff_dma[entry] = + dma_map_single(priv->device, skb->data, bfsize, + DMA_FROM_DEVICE); + + (p + entry)->des2 = priv->rx_skbuff_dma[entry]; + if (unlikely(priv->is_gmac)) { + if (bfsize >= BUF_SIZE_8KiB) + (p + entry)->des3 = + (p + entry)->des2 + BUF_SIZE_8KiB; + } + RX_DBG(KERN_INFO "\trefill entry #%d\n", entry); + } + priv->mac_type->ops->set_rx_owner(p + entry); + } + return; +} + +static int stmmac_rx(struct stmmac_priv *priv, int limit) +{ + unsigned int rxsize = priv->dma_rx_size; + unsigned int entry = priv->cur_rx % rxsize; + unsigned int next_entry; + unsigned int count = 0; + struct dma_desc *p = priv->dma_rx + entry; + struct dma_desc *p_next; + +#ifdef STMMAC_RX_DEBUG + if (netif_msg_hw(priv)) { + pr_debug(">>> stmmac_rx: descriptor ring:\n"); + display_ring(priv->dma_rx, rxsize); + } +#endif + count = 0; + while (!priv->mac_type->ops->get_rx_owner(p)) { + int status; + + if (count >= limit) + break; + + count++; + + next_entry = (++priv->cur_rx) % rxsize; + p_next = priv->dma_rx + next_entry; + prefetch(p_next); + + /* read the status of the incoming frame */ + status = (priv->mac_type->ops->rx_status(&priv->dev->stats, + &priv->xstats, p)); + if (unlikely(status == discard_frame)) + priv->dev->stats.rx_errors++; + else { + struct sk_buff *skb; + /* Length should omit the CRC */ + int frame_len = + priv->mac_type->ops->get_rx_frame_len(p) - 4; + +#ifdef STMMAC_RX_DEBUG + if (frame_len > ETH_FRAME_LEN) + pr_debug("\tRX frame size %d, COE status: %d\n", + frame_len, status); + + if (netif_msg_hw(priv)) + pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", + p, entry, p->des2); +#endif + skb = priv->rx_skbuff[entry]; + if (unlikely(!skb)) { + pr_err("%s: Inconsistent Rx descriptor chain\n", + priv->dev->name); + priv->dev->stats.rx_dropped++; + break; + } + prefetch(skb->data - NET_IP_ALIGN); + priv->rx_skbuff[entry] = NULL; + + skb_put(skb, frame_len); + dma_unmap_single(priv->device, + priv->rx_skbuff_dma[entry], + priv->dma_buf_sz, DMA_FROM_DEVICE); +#ifdef STMMAC_RX_DEBUG + if (netif_msg_pktdata(priv)) { + pr_info(" frame received (%dbytes)", frame_len); + print_pkt(skb->data, frame_len); + } +#endif + skb->protocol = eth_type_trans(skb, priv->dev); + + if (unlikely(status == csum_none)) { + /* always for the old mac 10/100 */ + skb->ip_summed = CHECKSUM_NONE; + netif_receive_skb(skb); + } else { + skb->ip_summed = CHECKSUM_UNNECESSARY; + napi_gro_receive(&priv->napi, skb); + } + + priv->dev->stats.rx_packets++; + priv->dev->stats.rx_bytes += frame_len; + priv->dev->last_rx = jiffies; + } + entry = next_entry; + p = p_next; /* use prefetched values */ + } + + stmmac_rx_refill(priv); + + priv->xstats.rx_pkt_n += count; + + return count; +} + +/** + * stmmac_poll - stmmac poll method (NAPI) + * @napi : pointer to the napi structure. + * @budget : maximum number of packets that the current CPU can receive from + * all interfaces. + * Description : + * This function implements the the reception process. + * Also it runs the TX completion thread + */ +static int stmmac_poll(struct napi_struct *napi, int budget) +{ + struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi); + int work_done = 0; + + priv->xstats.poll_n++; + stmmac_tx(priv); + work_done = stmmac_rx(priv, budget); + + if (work_done < budget) { + napi_complete(napi); + stmmac_enable_irq(priv); + } + return work_done; +} + +/** + * stmmac_tx_timeout + * @dev : Pointer to net device structure + * Description: this function is called when a packet transmission fails to + * complete within a reasonable tmrate. The driver will mark the error in the + * netdev structure and arrange for the device to be reset to a sane state + * in order to transmit a new packet. + */ +static void stmmac_tx_timeout(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + /* Clear Tx resources and restart transmitting again */ + stmmac_tx_err(priv); + return; +} + +/* Configuration changes (passed on by ifconfig) */ +static int stmmac_config(struct net_device *dev, struct ifmap *map) +{ + if (dev->flags & IFF_UP) /* can't act on a running interface */ + return -EBUSY; + + /* Don't allow changing the I/O address */ + if (map->base_addr != dev->base_addr) { + pr_warning("%s: can't change I/O address\n", dev->name); + return -EOPNOTSUPP; + } + + /* Don't allow changing the IRQ */ + if (map->irq != dev->irq) { + pr_warning("%s: can't change IRQ number %d\n", + dev->name, dev->irq); + return -EOPNOTSUPP; + } + + /* ignore other fields */ + return 0; +} + +/** + * stmmac_multicast_list - entry point for multicast addressing + * @dev : pointer to the device structure + * Description: + * This function is a driver entry point which gets called by the kernel + * whenever multicast addresses must be enabled/disabled. + * Return value: + * void. + */ +static void stmmac_multicast_list(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + spin_lock(&priv->lock); + priv->mac_type->ops->set_filter(dev); + spin_unlock(&priv->lock); + return; +} + +/** + * stmmac_change_mtu - entry point to change MTU size for the device. + * @dev : device pointer. + * @new_mtu : the new MTU size for the device. + * Description: the Maximum Transfer Unit (MTU) is used by the network layer + * to drive packet transmission. Ethernet has an MTU of 1500 octets + * (ETH_DATA_LEN). This value can be changed with ifconfig. + * Return value: + * 0 on success and an appropriate (-)ve integer as defined in errno.h + * file on failure. + */ +static int stmmac_change_mtu(struct net_device *dev, int new_mtu) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int max_mtu; + + if (netif_running(dev)) { + pr_err("%s: must be stopped to change its MTU\n", dev->name); + return -EBUSY; + } + + if (priv->is_gmac) + max_mtu = JUMBO_LEN; + else + max_mtu = ETH_DATA_LEN; + + if ((new_mtu < 46) || (new_mtu > max_mtu)) { + pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu); + return -EINVAL; + } + + dev->mtu = new_mtu; + + return 0; +} + +static irqreturn_t stmmac_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct stmmac_priv *priv = netdev_priv(dev); + + if (unlikely(!dev)) { + pr_err("%s: invalid dev pointer\n", __func__); + return IRQ_NONE; + } + + if (priv->is_gmac) { + unsigned long ioaddr = dev->base_addr; + /* To handle GMAC own interrupts */ + priv->mac_type->ops->host_irq_status(ioaddr); + } + stmmac_dma_interrupt(dev); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling receive - used by NETCONSOLE and other diagnostic tools + * to allow network I/O with interrupts disabled. */ +static void stmmac_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + stmmac_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +/** + * stmmac_ioctl - Entry point for the Ioctl + * @dev: Device pointer. + * @rq: An IOCTL specefic structure, that can contain a pointer to + * a proprietary structure used to pass information to the driver. + * @cmd: IOCTL command + * Description: + * Currently there are no special functionality supported in IOCTL, just the + * phy_mii_ioctl(...) can be invoked. + */ +static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + if (!netif_running(dev)) + return -EINVAL; + + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + if (!priv->phydev) + return -EINVAL; + + spin_lock(&priv->lock); + ret = phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); + spin_unlock(&priv->lock); + default: + break; + } + return ret; +} + +#ifdef STMMAC_VLAN_TAG_USED +static void stmmac_vlan_rx_register(struct net_device *dev, + struct vlan_group *grp) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + DBG(probe, INFO, "%s: Setting vlgrp to %p\n", dev->name, grp); + + spin_lock(&priv->lock); + priv->vlgrp = grp; + spin_unlock(&priv->lock); + + return; +} +#endif + +static const struct net_device_ops stmmac_netdev_ops = { + .ndo_open = stmmac_open, + .ndo_start_xmit = stmmac_xmit, + .ndo_stop = stmmac_release, + .ndo_change_mtu = stmmac_change_mtu, + .ndo_set_multicast_list = stmmac_multicast_list, + .ndo_tx_timeout = stmmac_tx_timeout, + .ndo_do_ioctl = stmmac_ioctl, + .ndo_set_config = stmmac_config, +#ifdef STMMAC_VLAN_TAG_USED + .ndo_vlan_rx_register = stmmac_vlan_rx_register, +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = stmmac_poll_controller, +#endif + .ndo_set_mac_address = eth_mac_addr, +}; + +/** + * stmmac_probe - Initialization of the adapter . + * @dev : device pointer + * Description: The function initializes the network device structure for + * the STMMAC driver. It also calls the low level routines + * in order to init the HW (i.e. the DMA engine) + */ +static int stmmac_probe(struct net_device *dev) +{ + int ret = 0; + struct stmmac_priv *priv = netdev_priv(dev); + + ether_setup(dev); + + dev->netdev_ops = &stmmac_netdev_ops; + stmmac_set_ethtool_ops(dev); + + dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA); + dev->watchdog_timeo = msecs_to_jiffies(watchdog); +#ifdef STMMAC_VLAN_TAG_USED + /* Both mac100 and gmac support receive VLAN tag detection */ + dev->features |= NETIF_F_HW_VLAN_RX; +#endif + priv->msg_enable = netif_msg_init(debug, default_msg_level); + + if (priv->is_gmac) + priv->rx_csum = 1; + + if (flow_ctrl) + priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ + + priv->pause = pause; + netif_napi_add(dev, &priv->napi, stmmac_poll, 64); + + /* Get the MAC address */ + priv->mac_type->ops->get_umac_addr(dev->base_addr, dev->dev_addr, 0); + + if (!is_valid_ether_addr(dev->dev_addr)) + pr_warning("\tno valid MAC address;" + "please, use ifconfig or nwhwconfig!\n"); + + ret = register_netdev(dev); + if (ret) { + pr_err("%s: ERROR %i registering the device\n", + __func__, ret); + return -ENODEV; + } + + DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n", + dev->name, (dev->features & NETIF_F_SG) ? "on" : "off", + (dev->features & NETIF_F_HW_CSUM) ? "on" : "off"); + + spin_lock_init(&priv->lock); + + return ret; +} + +/** + * stmmac_mac_device_setup + * @dev : device pointer + * Description: select and initialise the mac device (mac100 or Gmac). + */ +static int stmmac_mac_device_setup(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + + struct mac_device_info *device; + + if (priv->is_gmac) + device = gmac_setup(ioaddr); + else + device = mac100_setup(ioaddr); + + if (!device) + return -ENOMEM; + + priv->mac_type = device; + + priv->wolenabled = priv->mac_type->hw.pmt; /* PMT supported */ + if (priv->wolenabled == PMT_SUPPORTED) + priv->wolopts = WAKE_MAGIC; /* Magic Frame */ + + return 0; +} + +static int stmmacphy_dvr_probe(struct platform_device *pdev) +{ + struct plat_stmmacphy_data *plat_dat; + plat_dat = (struct plat_stmmacphy_data *)((pdev->dev).platform_data); + + pr_debug("stmmacphy_dvr_probe: added phy for bus %d\n", + plat_dat->bus_id); + + return 0; +} + +static int stmmacphy_dvr_remove(struct platform_device *pdev) +{ + return 0; +} + +static struct platform_driver stmmacphy_driver = { + .driver = { + .name = PHY_RESOURCE_NAME, + }, + .probe = stmmacphy_dvr_probe, + .remove = stmmacphy_dvr_remove, +}; + +/** + * stmmac_associate_phy + * @dev: pointer to device structure + * @data: points to the private structure. + * Description: Scans through all the PHYs we have registered and checks if + * any are associated with our MAC. If so, then just fill in + * the blanks in our local context structure + */ +static int stmmac_associate_phy(struct device *dev, void *data) +{ + struct stmmac_priv *priv = (struct stmmac_priv *)data; + struct plat_stmmacphy_data *plat_dat; + + plat_dat = (struct plat_stmmacphy_data *)(dev->platform_data); + + DBG(probe, DEBUG, "%s: checking phy for bus %d\n", __func__, + plat_dat->bus_id); + + /* Check that this phy is for the MAC being initialised */ + if (priv->bus_id != plat_dat->bus_id) + return 0; + + /* OK, this PHY is connected to the MAC. + Go ahead and get the parameters */ + DBG(probe, DEBUG, "%s: OK. Found PHY config\n", __func__); + priv->phy_irq = + platform_get_irq_byname(to_platform_device(dev), "phyirq"); + DBG(probe, DEBUG, "%s: PHY irq on bus %d is %d\n", __func__, + plat_dat->bus_id, priv->phy_irq); + + /* Override with kernel parameters if supplied XXX CRS XXX + * this needs to have multiple instances */ + if ((phyaddr >= 0) && (phyaddr <= 31)) + plat_dat->phy_addr = phyaddr; + + priv->phy_addr = plat_dat->phy_addr; + priv->phy_mask = plat_dat->phy_mask; + priv->phy_interface = plat_dat->interface; + priv->phy_reset = plat_dat->phy_reset; + + DBG(probe, DEBUG, "%s: exiting\n", __func__); + return 1; /* forces exit of driver_for_each_device() */ +} + +/** + * stmmac_dvr_probe + * @pdev: platform device pointer + * Description: the driver is initialized through platform_device. + */ +static int stmmac_dvr_probe(struct platform_device *pdev) +{ + int ret = 0; + struct resource *res; + unsigned int *addr = NULL; + struct net_device *ndev = NULL; + struct stmmac_priv *priv; + struct plat_stmmacenet_data *plat_dat; + + pr_info("STMMAC driver:\n\tplatform registration... "); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + ret = -ENODEV; + goto out; + } + pr_info("done!\n"); + + if (!request_mem_region(res->start, (res->end - res->start), + pdev->name)) { + pr_err("%s: ERROR: memory allocation failed" + "cannot get the I/O addr 0x%x\n", + __func__, (unsigned int)res->start); + ret = -EBUSY; + goto out; + } + + addr = ioremap(res->start, (res->end - res->start)); + if (!addr) { + pr_err("%s: ERROR: memory mapping failed \n", __func__); + ret = -ENOMEM; + goto out; + } + + ndev = alloc_etherdev(sizeof(struct stmmac_priv)); + if (!ndev) { + pr_err("%s: ERROR: allocating the device\n", __func__); + ret = -ENOMEM; + goto out; + } + + SET_NETDEV_DEV(ndev, &pdev->dev); + + /* Get the MAC information */ + ndev->irq = platform_get_irq_byname(pdev, "macirq"); + if (ndev->irq == -ENXIO) { + pr_err("%s: ERROR: MAC IRQ configuration " + "information not found\n", __func__); + ret = -ENODEV; + goto out; + } + + priv = netdev_priv(ndev); + priv->device = &(pdev->dev); + priv->dev = ndev; + plat_dat = (struct plat_stmmacenet_data *)((pdev->dev).platform_data); + priv->bus_id = plat_dat->bus_id; + priv->pbl = plat_dat->pbl; /* TLI */ + priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */ + + platform_set_drvdata(pdev, ndev); + + /* Set the I/O base addr */ + ndev->base_addr = (unsigned long)addr; + + /* MAC HW revice detection */ + ret = stmmac_mac_device_setup(ndev); + if (ret < 0) + goto out; + + /* Network Device Registration */ + ret = stmmac_probe(ndev); + if (ret < 0) + goto out; + + /* associate a PHY - it is provided by another platform bus */ + if (!driver_for_each_device + (&(stmmacphy_driver.driver), NULL, (void *)priv, + stmmac_associate_phy)) { + pr_err("No PHY device is associated with this MAC!\n"); + ret = -ENODEV; + goto out; + } + + priv->fix_mac_speed = plat_dat->fix_mac_speed; + priv->bsp_priv = plat_dat->bsp_priv; + + pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n" + "\tIO base addr: 0x%08x)\n", ndev->name, pdev->name, + pdev->id, ndev->irq, (unsigned int)addr); + + /* MDIO bus Registration */ + pr_debug("\tMDIO bus (id: %d)...", priv->bus_id); + ret = stmmac_mdio_register(ndev); + if (ret < 0) + goto out; + pr_debug("registered!\n"); + +out: + if (ret < 0) { + platform_set_drvdata(pdev, NULL); + release_mem_region(res->start, (res->end - res->start)); + if (addr != NULL) + iounmap(addr); + } + + return ret; +} + +/** + * stmmac_dvr_remove + * @pdev: platform device pointer + * Description: this function resets the TX/RX processes, disables the MAC RX/TX + * changes the link status, releases the DMA descriptor rings, + * unregisters the MDIO bus and unmaps the allocated memory. + */ +static int stmmac_dvr_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct resource *res; + + pr_info("%s:\n\tremoving driver", __func__); + + stmmac_dma_stop_rx(ndev->base_addr); + stmmac_dma_stop_tx(ndev->base_addr); + + stmmac_mac_disable_rx(ndev->base_addr); + stmmac_mac_disable_tx(ndev->base_addr); + + netif_carrier_off(ndev); + + stmmac_mdio_unregister(ndev); + + platform_set_drvdata(pdev, NULL); + unregister_netdev(ndev); + + iounmap((void *)ndev->base_addr); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, (res->end - res->start)); + + free_netdev(ndev); + + return 0; +} + +#ifdef CONFIG_PM +static int stmmac_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct stmmac_priv *priv = netdev_priv(dev); + int dis_ic = 0; + + if (!dev || !netif_running(dev)) + return 0; + + spin_lock(&priv->lock); + + if (state.event == PM_EVENT_SUSPEND) { + netif_device_detach(dev); + netif_stop_queue(dev); + if (priv->phydev) + phy_stop(priv->phydev); + +#ifdef CONFIG_STMMAC_TIMER + priv->tm->timer_stop(); + dis_ic = 1; +#endif + napi_disable(&priv->napi); + + /* Stop TX/RX DMA */ + stmmac_dma_stop_tx(dev->base_addr); + stmmac_dma_stop_rx(dev->base_addr); + /* Clear the Rx/Tx descriptors */ + priv->mac_type->ops->init_rx_desc(priv->dma_rx, + priv->dma_rx_size, dis_ic); + priv->mac_type->ops->init_tx_desc(priv->dma_tx, + priv->dma_tx_size); + + stmmac_mac_disable_tx(dev->base_addr); + + if (device_may_wakeup(&(pdev->dev))) { + /* Enable Power down mode by programming the PMT regs */ + if (priv->wolenabled == PMT_SUPPORTED) + priv->mac_type->ops->pmt(dev->base_addr, + priv->wolopts); + } else { + stmmac_mac_disable_rx(dev->base_addr); + } + } else { + priv->shutdown = 1; + /* Although this can appear slightly redundant it actually + * makes fast the standby operation and guarantees the driver + * working if hibernation is on media. */ + stmmac_release(dev); + } + + spin_unlock(&priv->lock); + return 0; +} + +static int stmmac_resume(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct stmmac_priv *priv = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + + if (!netif_running(dev)) + return 0; + + spin_lock(&priv->lock); + + if (priv->shutdown) { + /* Re-open the interface and re-init the MAC/DMA + and the rings. */ + stmmac_open(dev); + goto out_resume; + } + + /* Power Down bit, into the PM register, is cleared + * automatically as soon as a magic packet or a Wake-up frame + * is received. Anyway, it's better to manually clear + * this bit because it can generate problems while resuming + * from another devices (e.g. serial console). */ + if (device_may_wakeup(&(pdev->dev))) + if (priv->wolenabled == PMT_SUPPORTED) + priv->mac_type->ops->pmt(dev->base_addr, 0); + + netif_device_attach(dev); + + /* Enable the MAC and DMA */ + stmmac_mac_enable_rx(ioaddr); + stmmac_mac_enable_tx(ioaddr); + stmmac_dma_start_tx(ioaddr); + stmmac_dma_start_rx(ioaddr); + +#ifdef CONFIG_STMMAC_TIMER + priv->tm->timer_start(tmrate); +#endif + napi_enable(&priv->napi); + + if (priv->phydev) + phy_start(priv->phydev); + + netif_start_queue(dev); + +out_resume: + spin_unlock(&priv->lock); + return 0; +} +#endif + +static struct platform_driver stmmac_driver = { + .driver = { + .name = STMMAC_RESOURCE_NAME, + }, + .probe = stmmac_dvr_probe, + .remove = stmmac_dvr_remove, +#ifdef CONFIG_PM + .suspend = stmmac_suspend, + .resume = stmmac_resume, +#endif + +}; + +/** + * stmmac_init_module - Entry point for the driver + * Description: This function is the entry point for the driver. + */ +static int __init stmmac_init_module(void) +{ + int ret; + + if (platform_driver_register(&stmmacphy_driver)) { + pr_err("No PHY devices registered!\n"); + return -ENODEV; + } + + ret = platform_driver_register(&stmmac_driver); + return ret; +} + +/** + * stmmac_cleanup_module - Cleanup routine for the driver + * Description: This function is the cleanup routine for the driver. + */ +static void __exit stmmac_cleanup_module(void) +{ + platform_driver_unregister(&stmmacphy_driver); + platform_driver_unregister(&stmmac_driver); +} + +#ifndef MODULE +static int __init stmmac_cmdline_opt(char *str) +{ + char *opt; + + if (!str || !*str) + return -EINVAL; + while ((opt = strsep(&str, ",")) != NULL) { + if (!strncmp(opt, "debug:", 6)) + strict_strtoul(opt + 6, 0, (unsigned long *)&debug); + else if (!strncmp(opt, "phyaddr:", 8)) + strict_strtoul(opt + 8, 0, (unsigned long *)&phyaddr); + else if (!strncmp(opt, "dma_txsize:", 11)) + strict_strtoul(opt + 11, 0, + (unsigned long *)&dma_txsize); + else if (!strncmp(opt, "dma_rxsize:", 11)) + strict_strtoul(opt + 11, 0, + (unsigned long *)&dma_rxsize); + else if (!strncmp(opt, "buf_sz:", 7)) + strict_strtoul(opt + 7, 0, (unsigned long *)&buf_sz); + else if (!strncmp(opt, "tc:", 3)) + strict_strtoul(opt + 3, 0, (unsigned long *)&tc); + else if (!strncmp(opt, "tx_coe:", 7)) + strict_strtoul(opt + 7, 0, (unsigned long *)&tx_coe); + else if (!strncmp(opt, "watchdog:", 9)) + strict_strtoul(opt + 9, 0, (unsigned long *)&watchdog); + else if (!strncmp(opt, "flow_ctrl:", 10)) + strict_strtoul(opt + 10, 0, + (unsigned long *)&flow_ctrl); + else if (!strncmp(opt, "pause:", 6)) + strict_strtoul(opt + 6, 0, (unsigned long *)&pause); +#ifdef CONFIG_STMMAC_TIMER + else if (!strncmp(opt, "tmrate:", 7)) + strict_strtoul(opt + 7, 0, (unsigned long *)&tmrate); +#endif + } + return 0; +} + +__setup("stmmaceth=", stmmac_cmdline_opt); +#endif + +module_init(stmmac_init_module); +module_exit(stmmac_cleanup_module); + +MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet driver"); +MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c new file mode 100644 index 00000000000..8498552a22f --- /dev/null +++ b/drivers/net/stmmac/stmmac_mdio.c @@ -0,0 +1,217 @@ +/******************************************************************************* + STMMAC Ethernet Driver -- MDIO bus implementation + Provides Bus interface for MII registers + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Carl Shaw <carl.shaw@st.com> + Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#include <linux/netdevice.h> +#include <linux/mii.h> +#include <linux/phy.h> + +#include "stmmac.h" + +#define MII_BUSY 0x00000001 +#define MII_WRITE 0x00000002 + +/** + * stmmac_mdio_read + * @bus: points to the mii_bus structure + * @phyaddr: MII addr reg bits 15-11 + * @phyreg: MII addr reg bits 10-6 + * Description: it reads data from the MII register from within the phy device. + * For the 7111 GMAC, we must set the bit 0 in the MII address register while + * accessing the PHY registers. + * Fortunately, it seems this has no drawback for the 7109 MAC. + */ +static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned long ioaddr = ndev->base_addr; + unsigned int mii_address = priv->mac_type->hw.mii.addr; + unsigned int mii_data = priv->mac_type->hw.mii.data; + + int data; + u16 regValue = (((phyaddr << 11) & (0x0000F800)) | + ((phyreg << 6) & (0x000007C0))); + regValue |= MII_BUSY; /* in case of GMAC */ + + do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1); + writel(regValue, ioaddr + mii_address); + do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1); + + /* Read the data from the MII data register */ + data = (int)readl(ioaddr + mii_data); + + return data; +} + +/** + * stmmac_mdio_write + * @bus: points to the mii_bus structure + * @phyaddr: MII addr reg bits 15-11 + * @phyreg: MII addr reg bits 10-6 + * @phydata: phy data + * Description: it writes the data into the MII register from within the device. + */ +static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, + u16 phydata) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned long ioaddr = ndev->base_addr; + unsigned int mii_address = priv->mac_type->hw.mii.addr; + unsigned int mii_data = priv->mac_type->hw.mii.data; + + u16 value = + (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0))) + | MII_WRITE; + + value |= MII_BUSY; + + /* Wait until any existing MII operation is complete */ + do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1); + + /* Set the MII address register to write */ + writel(phydata, ioaddr + mii_data); + writel(value, ioaddr + mii_address); + + /* Wait until any existing MII operation is complete */ + do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1); + + return 0; +} + +/** + * stmmac_mdio_reset + * @bus: points to the mii_bus structure + * Description: reset the MII bus + */ +static int stmmac_mdio_reset(struct mii_bus *bus) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned long ioaddr = ndev->base_addr; + unsigned int mii_address = priv->mac_type->hw.mii.addr; + + if (priv->phy_reset) { + pr_debug("stmmac_mdio_reset: calling phy_reset\n"); + priv->phy_reset(priv->bsp_priv); + } + + /* This is a workaround for problems with the STE101P PHY. + * It doesn't complete its reset until at least one clock cycle + * on MDC, so perform a dummy mdio read. + */ + writel(0, ioaddr + mii_address); + + return 0; +} + +/** + * stmmac_mdio_register + * @ndev: net device structure + * Description: it registers the MII bus + */ +int stmmac_mdio_register(struct net_device *ndev) +{ + int err = 0; + struct mii_bus *new_bus; + int *irqlist; + struct stmmac_priv *priv = netdev_priv(ndev); + int addr, found; + + new_bus = mdiobus_alloc(); + if (new_bus == NULL) + return -ENOMEM; + + irqlist = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (irqlist == NULL) { + err = -ENOMEM; + goto irqlist_alloc_fail; + } + + /* Assign IRQ to phy at address phy_addr */ + if (priv->phy_addr != -1) + irqlist[priv->phy_addr] = priv->phy_irq; + + new_bus->name = "STMMAC MII Bus"; + new_bus->read = &stmmac_mdio_read; + new_bus->write = &stmmac_mdio_write; + new_bus->reset = &stmmac_mdio_reset; + snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->bus_id); + new_bus->priv = ndev; + new_bus->irq = irqlist; + new_bus->phy_mask = priv->phy_mask; + new_bus->parent = priv->device; + err = mdiobus_register(new_bus); + if (err != 0) { + pr_err("%s: Cannot register as MDIO bus\n", new_bus->name); + goto bus_register_fail; + } + + priv->mii = new_bus; + + found = 0; + for (addr = 0; addr < 32; addr++) { + struct phy_device *phydev = new_bus->phy_map[addr]; + if (phydev) { + if (priv->phy_addr == -1) { + priv->phy_addr = addr; + phydev->irq = priv->phy_irq; + irqlist[addr] = priv->phy_irq; + } + pr_info("%s: PHY ID %08x at %d IRQ %d (%s)%s\n", + ndev->name, phydev->phy_id, addr, + phydev->irq, dev_name(&phydev->dev), + (addr == priv->phy_addr) ? " active" : ""); + found = 1; + } + } + + if (!found) + pr_warning("%s: No PHY found\n", ndev->name); + + return 0; +bus_register_fail: + kfree(irqlist); +irqlist_alloc_fail: + kfree(new_bus); + return err; +} + +/** + * stmmac_mdio_unregister + * @ndev: net device structure + * Description: it unregisters the MII bus + */ +int stmmac_mdio_unregister(struct net_device *ndev) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + + mdiobus_unregister(priv->mii); + priv->mii->priv = NULL; + kfree(priv->mii); + + return 0; +} diff --git a/drivers/net/stmmac/stmmac_timer.c b/drivers/net/stmmac/stmmac_timer.c new file mode 100644 index 00000000000..b838c658207 --- /dev/null +++ b/drivers/net/stmmac/stmmac_timer.c @@ -0,0 +1,140 @@ +/******************************************************************************* + STMMAC external timer support. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#include <linux/kernel.h> +#include <linux/etherdevice.h> +#include "stmmac_timer.h" + +static void stmmac_timer_handler(void *data) +{ + struct net_device *dev = (struct net_device *)data; + + stmmac_schedule(dev); + + return; +} + +#define STMMAC_TIMER_MSG(timer, freq) \ +printk(KERN_INFO "stmmac_timer: %s Timer ON (freq %dHz)\n", timer, freq); + +#if defined(CONFIG_STMMAC_RTC_TIMER) +#include <linux/rtc.h> +static struct rtc_device *stmmac_rtc; +static rtc_task_t stmmac_task; + +static void stmmac_rtc_start(unsigned int new_freq) +{ + rtc_irq_set_freq(stmmac_rtc, &stmmac_task, new_freq); + rtc_irq_set_state(stmmac_rtc, &stmmac_task, 1); + return; +} + +static void stmmac_rtc_stop(void) +{ + rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0); + return; +} + +int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm) +{ + stmmac_task.private_data = dev; + stmmac_task.func = stmmac_timer_handler; + + stmmac_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE); + if (stmmac_rtc == NULL) { + pr_error("open rtc device failed\n"); + return -ENODEV; + } + + rtc_irq_register(stmmac_rtc, &stmmac_task); + + /* Periodic mode is not supported */ + if ((rtc_irq_set_freq(stmmac_rtc, &stmmac_task, tm->freq) < 0)) { + pr_error("set periodic failed\n"); + rtc_irq_unregister(stmmac_rtc, &stmmac_task); + rtc_class_close(stmmac_rtc); + return -1; + } + + STMMAC_TIMER_MSG(CONFIG_RTC_HCTOSYS_DEVICE, tm->freq); + + tm->timer_start = stmmac_rtc_start; + tm->timer_stop = stmmac_rtc_stop; + + return 0; +} + +int stmmac_close_ext_timer(void) +{ + rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0); + rtc_irq_unregister(stmmac_rtc, &stmmac_task); + rtc_class_close(stmmac_rtc); + return 0; +} + +#elif defined(CONFIG_STMMAC_TMU_TIMER) +#include <linux/clk.h> +#define TMU_CHANNEL "tmu2_clk" +static struct clk *timer_clock; + +static void stmmac_tmu_start(unsigned int new_freq) +{ + clk_set_rate(timer_clock, new_freq); + clk_enable(timer_clock); + return; +} + +static void stmmac_tmu_stop(void) +{ + clk_disable(timer_clock); + return; +} + +int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm) +{ + timer_clock = clk_get(NULL, TMU_CHANNEL); + + if (timer_clock == NULL) + return -1; + + if (tmu2_register_user(stmmac_timer_handler, (void *)dev) < 0) { + timer_clock = NULL; + return -1; + } + + STMMAC_TIMER_MSG("TMU2", tm->freq); + tm->timer_start = stmmac_tmu_start; + tm->timer_stop = stmmac_tmu_stop; + + return 0; +} + +int stmmac_close_ext_timer(void) +{ + clk_disable(timer_clock); + tmu2_unregister_user(); + clk_put(timer_clock); + return 0; +} +#endif diff --git a/drivers/net/stmmac/stmmac_timer.h b/drivers/net/stmmac/stmmac_timer.h new file mode 100644 index 00000000000..f795cae3372 --- /dev/null +++ b/drivers/net/stmmac/stmmac_timer.h @@ -0,0 +1,41 @@ +/******************************************************************************* + STMMAC external timer Header File. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +struct stmmac_timer { + void (*timer_start) (unsigned int new_freq); + void (*timer_stop) (void); + unsigned int freq; +}; + +/* Open the HW timer device and return 0 in case of success */ +int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm); +/* Stop the timer and release it */ +int stmmac_close_ext_timer(void); +/* Function used for scheduling task within the stmmac */ +void stmmac_schedule(struct net_device *dev); + +#if defined(CONFIG_STMMAC_TMU_TIMER) +extern int tmu2_register_user(void *fnt, void *data); +extern void tmu2_unregister_user(void); +#endif diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 6fdaba8674b..ed4a508ef26 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -62,8 +62,11 @@ static char *devid=NULL; static struct usb_eth_dev usb_dev_id[] = { #define PEGASUS_DEV(pn, vid, pid, flags) \ {.name = pn, .vendor = vid, .device = pid, .private = flags}, +#define PEGASUS_DEV_CLASS(pn, vid, pid, dclass, flags) \ + PEGASUS_DEV(pn, vid, pid, flags) #include "pegasus.h" #undef PEGASUS_DEV +#undef PEGASUS_DEV_CLASS {NULL, 0, 0, 0}, {NULL, 0, 0, 0} }; @@ -71,8 +74,18 @@ static struct usb_eth_dev usb_dev_id[] = { static struct usb_device_id pegasus_ids[] = { #define PEGASUS_DEV(pn, vid, pid, flags) \ {.match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = vid, .idProduct = pid}, +/* + * The Belkin F8T012xx1 bluetooth adaptor has the same vendor and product + * IDs as the Belkin F5D5050, so we need to teach the pegasus driver to + * ignore adaptors belonging to the "Wireless" class 0xE0. For this one + * case anyway, seeing as the pegasus is for "Wired" adaptors. + */ +#define PEGASUS_DEV_CLASS(pn, vid, pid, dclass, flags) \ + {.match_flags = (USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_DEV_CLASS), \ + .idVendor = vid, .idProduct = pid, .bDeviceClass = dclass}, #include "pegasus.h" #undef PEGASUS_DEV +#undef PEGASUS_DEV_CLASS {}, {} }; diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h index f968c834ff6..5d02f020073 100644 --- a/drivers/net/usb/pegasus.h +++ b/drivers/net/usb/pegasus.h @@ -202,7 +202,11 @@ PEGASUS_DEV( "AEI USB Fast Ethernet Adapter", VENDOR_AEILAB, 0x1701, DEFAULT_GPIO_RESET | PEGASUS_II ) PEGASUS_DEV( "Allied Telesyn Int. AT-USB100", VENDOR_ALLIEDTEL, 0xb100, DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121, +/* + * Distinguish between this Belkin adaptor and the Belkin bluetooth adaptors + * with the same product IDs by checking the device class too. + */ +PEGASUS_DEV_CLASS( "Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121, 0x00, DEFAULT_GPIO_RESET | PEGASUS_II ) PEGASUS_DEV( "Billionton USB-100", VENDOR_BILLIONTON, 0x0986, DEFAULT_GPIO_RESET ) diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile new file mode 100644 index 00000000000..880f5098eac --- /dev/null +++ b/drivers/net/vmxnet3/Makefile @@ -0,0 +1,35 @@ +################################################################################ +# +# Linux driver for VMware's vmxnet3 ethernet NIC. +# +# Copyright (C) 2007-2009, VMware, Inc. All Rights Reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation; version 2 of the License and no later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or +# NON INFRINGEMENT. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> +# +# +################################################################################ + +# +# Makefile for the VMware vmxnet3 ethernet NIC driver +# + +obj-$(CONFIG_VMXNET3) += vmxnet3.o + +vmxnet3-objs := vmxnet3_drv.o vmxnet3_ethtool.o diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h new file mode 100644 index 00000000000..37108fb226d --- /dev/null +++ b/drivers/net/vmxnet3/upt1_defs.h @@ -0,0 +1,96 @@ +/* + * Linux driver for VMware's vmxnet3 ethernet NIC. + * + * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2 of the License and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> + * + */ + +#ifndef _UPT1_DEFS_H +#define _UPT1_DEFS_H + +struct UPT1_TxStats { + u64 TSOPktsTxOK; /* TSO pkts post-segmentation */ + u64 TSOBytesTxOK; + u64 ucastPktsTxOK; + u64 ucastBytesTxOK; + u64 mcastPktsTxOK; + u64 mcastBytesTxOK; + u64 bcastPktsTxOK; + u64 bcastBytesTxOK; + u64 pktsTxError; + u64 pktsTxDiscard; +}; + +struct UPT1_RxStats { + u64 LROPktsRxOK; /* LRO pkts */ + u64 LROBytesRxOK; /* bytes from LRO pkts */ + /* the following counters are for pkts from the wire, i.e., pre-LRO */ + u64 ucastPktsRxOK; + u64 ucastBytesRxOK; + u64 mcastPktsRxOK; + u64 mcastBytesRxOK; + u64 bcastPktsRxOK; + u64 bcastBytesRxOK; + u64 pktsRxOutOfBuf; + u64 pktsRxError; +}; + +/* interrupt moderation level */ +enum { + UPT1_IML_NONE = 0, /* no interrupt moderation */ + UPT1_IML_HIGHEST = 7, /* least intr generated */ + UPT1_IML_ADAPTIVE = 8, /* adpative intr moderation */ +}; +/* values for UPT1_RSSConf.hashFunc */ +enum { + UPT1_RSS_HASH_TYPE_NONE = 0x0, + UPT1_RSS_HASH_TYPE_IPV4 = 0x01, + UPT1_RSS_HASH_TYPE_TCP_IPV4 = 0x02, + UPT1_RSS_HASH_TYPE_IPV6 = 0x04, + UPT1_RSS_HASH_TYPE_TCP_IPV6 = 0x08, +}; + +enum { + UPT1_RSS_HASH_FUNC_NONE = 0x0, + UPT1_RSS_HASH_FUNC_TOEPLITZ = 0x01, +}; + +#define UPT1_RSS_MAX_KEY_SIZE 40 +#define UPT1_RSS_MAX_IND_TABLE_SIZE 128 + +struct UPT1_RSSConf { + u16 hashType; + u16 hashFunc; + u16 hashKeySize; + u16 indTableSize; + u8 hashKey[UPT1_RSS_MAX_KEY_SIZE]; + u8 indTable[UPT1_RSS_MAX_IND_TABLE_SIZE]; +}; + +/* features */ +enum { + UPT1_F_RXCSUM = 0x0001, /* rx csum verification */ + UPT1_F_RSS = 0x0002, + UPT1_F_RXVLAN = 0x0004, /* VLAN tag stripping */ + UPT1_F_LRO = 0x0008, +}; +#endif diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h new file mode 100644 index 00000000000..dc8ee4438a4 --- /dev/null +++ b/drivers/net/vmxnet3/vmxnet3_defs.h @@ -0,0 +1,535 @@ +/* + * Linux driver for VMware's vmxnet3 ethernet NIC. + * + * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2 of the License and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> + * + */ + +#ifndef _VMXNET3_DEFS_H_ +#define _VMXNET3_DEFS_H_ + +#include "upt1_defs.h" + +/* all registers are 32 bit wide */ +/* BAR 1 */ +enum { + VMXNET3_REG_VRRS = 0x0, /* Vmxnet3 Revision Report Selection */ + VMXNET3_REG_UVRS = 0x8, /* UPT Version Report Selection */ + VMXNET3_REG_DSAL = 0x10, /* Driver Shared Address Low */ + VMXNET3_REG_DSAH = 0x18, /* Driver Shared Address High */ + VMXNET3_REG_CMD = 0x20, /* Command */ + VMXNET3_REG_MACL = 0x28, /* MAC Address Low */ + VMXNET3_REG_MACH = 0x30, /* MAC Address High */ + VMXNET3_REG_ICR = 0x38, /* Interrupt Cause Register */ + VMXNET3_REG_ECR = 0x40 /* Event Cause Register */ +}; + +/* BAR 0 */ +enum { + VMXNET3_REG_IMR = 0x0, /* Interrupt Mask Register */ + VMXNET3_REG_TXPROD = 0x600, /* Tx Producer Index */ + VMXNET3_REG_RXPROD = 0x800, /* Rx Producer Index for ring 1 */ + VMXNET3_REG_RXPROD2 = 0xA00 /* Rx Producer Index for ring 2 */ +}; + +#define VMXNET3_PT_REG_SIZE 4096 /* BAR 0 */ +#define VMXNET3_VD_REG_SIZE 4096 /* BAR 1 */ + +#define VMXNET3_REG_ALIGN 8 /* All registers are 8-byte aligned. */ +#define VMXNET3_REG_ALIGN_MASK 0x7 + +/* I/O Mapped access to registers */ +#define VMXNET3_IO_TYPE_PT 0 +#define VMXNET3_IO_TYPE_VD 1 +#define VMXNET3_IO_ADDR(type, reg) (((type) << 24) | ((reg) & 0xFFFFFF)) +#define VMXNET3_IO_TYPE(addr) ((addr) >> 24) +#define VMXNET3_IO_REG(addr) ((addr) & 0xFFFFFF) + +enum { + VMXNET3_CMD_FIRST_SET = 0xCAFE0000, + VMXNET3_CMD_ACTIVATE_DEV = VMXNET3_CMD_FIRST_SET, + VMXNET3_CMD_QUIESCE_DEV, + VMXNET3_CMD_RESET_DEV, + VMXNET3_CMD_UPDATE_RX_MODE, + VMXNET3_CMD_UPDATE_MAC_FILTERS, + VMXNET3_CMD_UPDATE_VLAN_FILTERS, + VMXNET3_CMD_UPDATE_RSSIDT, + VMXNET3_CMD_UPDATE_IML, + VMXNET3_CMD_UPDATE_PMCFG, + VMXNET3_CMD_UPDATE_FEATURE, + VMXNET3_CMD_LOAD_PLUGIN, + + VMXNET3_CMD_FIRST_GET = 0xF00D0000, + VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET, + VMXNET3_CMD_GET_STATS, + VMXNET3_CMD_GET_LINK, + VMXNET3_CMD_GET_PERM_MAC_LO, + VMXNET3_CMD_GET_PERM_MAC_HI, + VMXNET3_CMD_GET_DID_LO, + VMXNET3_CMD_GET_DID_HI, + VMXNET3_CMD_GET_DEV_EXTRA_INFO, + VMXNET3_CMD_GET_CONF_INTR +}; + +struct Vmxnet3_TxDesc { + u64 addr; + + u32 len:14; + u32 gen:1; /* generation bit */ + u32 rsvd:1; + u32 dtype:1; /* descriptor type */ + u32 ext1:1; + u32 msscof:14; /* MSS, checksum offset, flags */ + + u32 hlen:10; /* header len */ + u32 om:2; /* offload mode */ + u32 eop:1; /* End Of Packet */ + u32 cq:1; /* completion request */ + u32 ext2:1; + u32 ti:1; /* VLAN Tag Insertion */ + u32 tci:16; /* Tag to Insert */ +}; + +/* TxDesc.OM values */ +#define VMXNET3_OM_NONE 0 +#define VMXNET3_OM_CSUM 2 +#define VMXNET3_OM_TSO 3 + +/* fields in TxDesc we access w/o using bit fields */ +#define VMXNET3_TXD_EOP_SHIFT 12 +#define VMXNET3_TXD_CQ_SHIFT 13 +#define VMXNET3_TXD_GEN_SHIFT 14 + +#define VMXNET3_TXD_CQ (1 << VMXNET3_TXD_CQ_SHIFT) +#define VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT) +#define VMXNET3_TXD_GEN (1 << VMXNET3_TXD_GEN_SHIFT) + +#define VMXNET3_HDR_COPY_SIZE 128 + + +struct Vmxnet3_TxDataDesc { + u8 data[VMXNET3_HDR_COPY_SIZE]; +}; + + +struct Vmxnet3_TxCompDesc { + u32 txdIdx:12; /* Index of the EOP TxDesc */ + u32 ext1:20; + + u32 ext2; + u32 ext3; + + u32 rsvd:24; + u32 type:7; /* completion type */ + u32 gen:1; /* generation bit */ +}; + + +struct Vmxnet3_RxDesc { + u64 addr; + + u32 len:14; + u32 btype:1; /* Buffer Type */ + u32 dtype:1; /* Descriptor type */ + u32 rsvd:15; + u32 gen:1; /* Generation bit */ + + u32 ext1; +}; + +/* values of RXD.BTYPE */ +#define VMXNET3_RXD_BTYPE_HEAD 0 /* head only */ +#define VMXNET3_RXD_BTYPE_BODY 1 /* body only */ + +/* fields in RxDesc we access w/o using bit fields */ +#define VMXNET3_RXD_BTYPE_SHIFT 14 +#define VMXNET3_RXD_GEN_SHIFT 31 + + +struct Vmxnet3_RxCompDesc { + u32 rxdIdx:12; /* Index of the RxDesc */ + u32 ext1:2; + u32 eop:1; /* End of Packet */ + u32 sop:1; /* Start of Packet */ + u32 rqID:10; /* rx queue/ring ID */ + u32 rssType:4; /* RSS hash type used */ + u32 cnc:1; /* Checksum Not Calculated */ + u32 ext2:1; + + u32 rssHash; /* RSS hash value */ + + u32 len:14; /* data length */ + u32 err:1; /* Error */ + u32 ts:1; /* Tag is stripped */ + u32 tci:16; /* Tag stripped */ + + u32 csum:16; + u32 tuc:1; /* TCP/UDP Checksum Correct */ + u32 udp:1; /* UDP packet */ + u32 tcp:1; /* TCP packet */ + u32 ipc:1; /* IP Checksum Correct */ + u32 v6:1; /* IPv6 */ + u32 v4:1; /* IPv4 */ + u32 frg:1; /* IP Fragment */ + u32 fcs:1; /* Frame CRC correct */ + u32 type:7; /* completion type */ + u32 gen:1; /* generation bit */ +}; + +/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */ +#define VMXNET3_RCD_TUC_SHIFT 16 +#define VMXNET3_RCD_IPC_SHIFT 19 + +/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.qword[1] */ +#define VMXNET3_RCD_TYPE_SHIFT 56 +#define VMXNET3_RCD_GEN_SHIFT 63 + +/* csum OK for TCP/UDP pkts over IP */ +#define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | \ + 1 << VMXNET3_RCD_IPC_SHIFT) + +/* value of RxCompDesc.rssType */ +enum { + VMXNET3_RCD_RSS_TYPE_NONE = 0, + VMXNET3_RCD_RSS_TYPE_IPV4 = 1, + VMXNET3_RCD_RSS_TYPE_TCPIPV4 = 2, + VMXNET3_RCD_RSS_TYPE_IPV6 = 3, + VMXNET3_RCD_RSS_TYPE_TCPIPV6 = 4, +}; + + +/* a union for accessing all cmd/completion descriptors */ +union Vmxnet3_GenericDesc { + u64 qword[2]; + u32 dword[4]; + u16 word[8]; + struct Vmxnet3_TxDesc txd; + struct Vmxnet3_RxDesc rxd; + struct Vmxnet3_TxCompDesc tcd; + struct Vmxnet3_RxCompDesc rcd; +}; + +#define VMXNET3_INIT_GEN 1 + +/* Max size of a single tx buffer */ +#define VMXNET3_MAX_TX_BUF_SIZE (1 << 14) + +/* # of tx desc needed for a tx buffer size */ +#define VMXNET3_TXD_NEEDED(size) (((size) + VMXNET3_MAX_TX_BUF_SIZE - 1) / \ + VMXNET3_MAX_TX_BUF_SIZE) + +/* max # of tx descs for a non-tso pkt */ +#define VMXNET3_MAX_TXD_PER_PKT 16 + +/* Max size of a single rx buffer */ +#define VMXNET3_MAX_RX_BUF_SIZE ((1 << 14) - 1) +/* Minimum size of a type 0 buffer */ +#define VMXNET3_MIN_T0_BUF_SIZE 128 +#define VMXNET3_MAX_CSUM_OFFSET 1024 + +/* Ring base address alignment */ +#define VMXNET3_RING_BA_ALIGN 512 +#define VMXNET3_RING_BA_MASK (VMXNET3_RING_BA_ALIGN - 1) + +/* Ring size must be a multiple of 32 */ +#define VMXNET3_RING_SIZE_ALIGN 32 +#define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1) + +/* Max ring size */ +#define VMXNET3_TX_RING_MAX_SIZE 4096 +#define VMXNET3_TC_RING_MAX_SIZE 4096 +#define VMXNET3_RX_RING_MAX_SIZE 4096 +#define VMXNET3_RC_RING_MAX_SIZE 8192 + +/* a list of reasons for queue stop */ + +enum { + VMXNET3_ERR_NOEOP = 0x80000000, /* cannot find the EOP desc of a pkt */ + VMXNET3_ERR_TXD_REUSE = 0x80000001, /* reuse TxDesc before tx completion */ + VMXNET3_ERR_BIG_PKT = 0x80000002, /* too many TxDesc for a pkt */ + VMXNET3_ERR_DESC_NOT_SPT = 0x80000003, /* descriptor type not supported */ + VMXNET3_ERR_SMALL_BUF = 0x80000004, /* type 0 buffer too small */ + VMXNET3_ERR_STRESS = 0x80000005, /* stress option firing in vmkernel */ + VMXNET3_ERR_SWITCH = 0x80000006, /* mode switch failure */ + VMXNET3_ERR_TXD_INVALID = 0x80000007, /* invalid TxDesc */ +}; + +/* completion descriptor types */ +#define VMXNET3_CDTYPE_TXCOMP 0 /* Tx Completion Descriptor */ +#define VMXNET3_CDTYPE_RXCOMP 3 /* Rx Completion Descriptor */ + +enum { + VMXNET3_GOS_BITS_UNK = 0, /* unknown */ + VMXNET3_GOS_BITS_32 = 1, + VMXNET3_GOS_BITS_64 = 2, +}; + +#define VMXNET3_GOS_TYPE_LINUX 1 + + +struct Vmxnet3_GOSInfo { + u32 gosBits:2; /* 32-bit or 64-bit? */ + u32 gosType:4; /* which guest */ + u32 gosVer:16; /* gos version */ + u32 gosMisc:10; /* other info about gos */ +}; + + +struct Vmxnet3_DriverInfo { + u32 version; + struct Vmxnet3_GOSInfo gos; + u32 vmxnet3RevSpt; + u32 uptVerSpt; +}; + + +#define VMXNET3_REV1_MAGIC 0xbabefee1 + +/* + * QueueDescPA must be 128 bytes aligned. It points to an array of + * Vmxnet3_TxQueueDesc followed by an array of Vmxnet3_RxQueueDesc. + * The number of Vmxnet3_TxQueueDesc/Vmxnet3_RxQueueDesc are specified by + * Vmxnet3_MiscConf.numTxQueues/numRxQueues, respectively. + */ +#define VMXNET3_QUEUE_DESC_ALIGN 128 + + +struct Vmxnet3_MiscConf { + struct Vmxnet3_DriverInfo driverInfo; + u64 uptFeatures; + u64 ddPA; /* driver data PA */ + u64 queueDescPA; /* queue descriptor table PA */ + u32 ddLen; /* driver data len */ + u32 queueDescLen; /* queue desc. table len in bytes */ + u32 mtu; + u16 maxNumRxSG; + u8 numTxQueues; + u8 numRxQueues; + u32 reserved[4]; +}; + + +struct Vmxnet3_TxQueueConf { + u64 txRingBasePA; + u64 dataRingBasePA; + u64 compRingBasePA; + u64 ddPA; /* driver data */ + u64 reserved; + u32 txRingSize; /* # of tx desc */ + u32 dataRingSize; /* # of data desc */ + u32 compRingSize; /* # of comp desc */ + u32 ddLen; /* size of driver data */ + u8 intrIdx; + u8 _pad[7]; +}; + + +struct Vmxnet3_RxQueueConf { + u64 rxRingBasePA[2]; + u64 compRingBasePA; + u64 ddPA; /* driver data */ + u64 reserved; + u32 rxRingSize[2]; /* # of rx desc */ + u32 compRingSize; /* # of rx comp desc */ + u32 ddLen; /* size of driver data */ + u8 intrIdx; + u8 _pad[7]; +}; + + +enum vmxnet3_intr_mask_mode { + VMXNET3_IMM_AUTO = 0, + VMXNET3_IMM_ACTIVE = 1, + VMXNET3_IMM_LAZY = 2 +}; + +enum vmxnet3_intr_type { + VMXNET3_IT_AUTO = 0, + VMXNET3_IT_INTX = 1, + VMXNET3_IT_MSI = 2, + VMXNET3_IT_MSIX = 3 +}; + +#define VMXNET3_MAX_TX_QUEUES 8 +#define VMXNET3_MAX_RX_QUEUES 16 +/* addition 1 for events */ +#define VMXNET3_MAX_INTRS 25 + + +struct Vmxnet3_IntrConf { + bool autoMask; + u8 numIntrs; /* # of interrupts */ + u8 eventIntrIdx; + u8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for + * each intr */ + u32 reserved[3]; +}; + +/* one bit per VLAN ID, the size is in the units of u32 */ +#define VMXNET3_VFT_SIZE (4096 / (sizeof(u32) * 8)) + + +struct Vmxnet3_QueueStatus { + bool stopped; + u8 _pad[3]; + u32 error; +}; + + +struct Vmxnet3_TxQueueCtrl { + u32 txNumDeferred; + u32 txThreshold; + u64 reserved; +}; + + +struct Vmxnet3_RxQueueCtrl { + bool updateRxProd; + u8 _pad[7]; + u64 reserved; +}; + +enum { + VMXNET3_RXM_UCAST = 0x01, /* unicast only */ + VMXNET3_RXM_MCAST = 0x02, /* multicast passing the filters */ + VMXNET3_RXM_BCAST = 0x04, /* broadcast only */ + VMXNET3_RXM_ALL_MULTI = 0x08, /* all multicast */ + VMXNET3_RXM_PROMISC = 0x10 /* promiscuous */ +}; + +struct Vmxnet3_RxFilterConf { + u32 rxMode; /* VMXNET3_RXM_xxx */ + u16 mfTableLen; /* size of the multicast filter table */ + u16 _pad1; + u64 mfTablePA; /* PA of the multicast filters table */ + u32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */ +}; + + +#define VMXNET3_PM_MAX_FILTERS 6 +#define VMXNET3_PM_MAX_PATTERN_SIZE 128 +#define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8) + +#define VMXNET3_PM_WAKEUP_MAGIC 0x01 /* wake up on magic pkts */ +#define VMXNET3_PM_WAKEUP_FILTER 0x02 /* wake up on pkts matching + * filters */ + + +struct Vmxnet3_PM_PktFilter { + u8 maskSize; + u8 patternSize; + u8 mask[VMXNET3_PM_MAX_MASK_SIZE]; + u8 pattern[VMXNET3_PM_MAX_PATTERN_SIZE]; + u8 pad[6]; +}; + + +struct Vmxnet3_PMConf { + u16 wakeUpEvents; /* VMXNET3_PM_WAKEUP_xxx */ + u8 numFilters; + u8 pad[5]; + struct Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS]; +}; + + +struct Vmxnet3_VariableLenConfDesc { + u32 confVer; + u32 confLen; + u64 confPA; +}; + + +struct Vmxnet3_TxQueueDesc { + struct Vmxnet3_TxQueueCtrl ctrl; + struct Vmxnet3_TxQueueConf conf; + + /* Driver read after a GET command */ + struct Vmxnet3_QueueStatus status; + struct UPT1_TxStats stats; + u8 _pad[88]; /* 128 aligned */ +}; + + +struct Vmxnet3_RxQueueDesc { + struct Vmxnet3_RxQueueCtrl ctrl; + struct Vmxnet3_RxQueueConf conf; + /* Driver read after a GET commad */ + struct Vmxnet3_QueueStatus status; + struct UPT1_RxStats stats; + u8 __pad[88]; /* 128 aligned */ +}; + + +struct Vmxnet3_DSDevRead { + /* read-only region for device, read by dev in response to a SET cmd */ + struct Vmxnet3_MiscConf misc; + struct Vmxnet3_IntrConf intrConf; + struct Vmxnet3_RxFilterConf rxFilterConf; + struct Vmxnet3_VariableLenConfDesc rssConfDesc; + struct Vmxnet3_VariableLenConfDesc pmConfDesc; + struct Vmxnet3_VariableLenConfDesc pluginConfDesc; +}; + +/* All structures in DriverShared are padded to multiples of 8 bytes */ +struct Vmxnet3_DriverShared { + u32 magic; + /* make devRead start at 64bit boundaries */ + u32 pad; + struct Vmxnet3_DSDevRead devRead; + u32 ecr; + u32 reserved[5]; +}; + + +#define VMXNET3_ECR_RQERR (1 << 0) +#define VMXNET3_ECR_TQERR (1 << 1) +#define VMXNET3_ECR_LINK (1 << 2) +#define VMXNET3_ECR_DIC (1 << 3) +#define VMXNET3_ECR_DEBUG (1 << 4) + +/* flip the gen bit of a ring */ +#define VMXNET3_FLIP_RING_GEN(gen) ((gen) = (gen) ^ 0x1) + +/* only use this if moving the idx won't affect the gen bit */ +#define VMXNET3_INC_RING_IDX_ONLY(idx, ring_size) \ + do {\ + (idx)++;\ + if (unlikely((idx) == (ring_size))) {\ + (idx) = 0;\ + } \ + } while (0) + +#define VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid) \ + (vfTable[vid >> 5] |= (1 << (vid & 31))) +#define VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid) \ + (vfTable[vid >> 5] &= ~(1 << (vid & 31))) + +#define VMXNET3_VFTABLE_ENTRY_IS_SET(vfTable, vid) \ + ((vfTable[vid >> 5] & (1 << (vid & 31))) != 0) + +#define VMXNET3_MAX_MTU 9000 +#define VMXNET3_MIN_MTU 60 + +#define VMXNET3_LINK_UP (10000 << 16 | 1) /* 10 Gbps, up */ +#define VMXNET3_LINK_DOWN 0 + +#endif /* _VMXNET3_DEFS_H_ */ diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c new file mode 100644 index 00000000000..6a16f76f277 --- /dev/null +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -0,0 +1,2565 @@ +/* + * Linux driver for VMware's vmxnet3 ethernet NIC. + * + * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2 of the License and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> + * + */ + +#include "vmxnet3_int.h" + +char vmxnet3_driver_name[] = "vmxnet3"; +#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver" + + +/* + * PCI Device ID Table + * Last entry must be all 0s + */ +static const struct pci_device_id vmxnet3_pciid_table[] = { + {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)}, + {0} +}; + +MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table); + +static atomic_t devices_found; + + +/* + * Enable/Disable the given intr + */ +static void +vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx) +{ + VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0); +} + + +static void +vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx) +{ + VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1); +} + + +/* + * Enable/Disable all intrs used by the device + */ +static void +vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->intr.num_intrs; i++) + vmxnet3_enable_intr(adapter, i); +} + + +static void +vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->intr.num_intrs; i++) + vmxnet3_disable_intr(adapter, i); +} + + +static void +vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events) +{ + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events); +} + + +static bool +vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) +{ + return netif_queue_stopped(adapter->netdev); +} + + +static void +vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) +{ + tq->stopped = false; + netif_start_queue(adapter->netdev); +} + + +static void +vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) +{ + tq->stopped = false; + netif_wake_queue(adapter->netdev); +} + + +static void +vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) +{ + tq->stopped = true; + tq->num_stop++; + netif_stop_queue(adapter->netdev); +} + + +/* + * Check the link state. This may start or stop the tx queue. + */ +static void +vmxnet3_check_link(struct vmxnet3_adapter *adapter) +{ + u32 ret; + + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); + ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + adapter->link_speed = ret >> 16; + if (ret & 1) { /* Link is up. */ + printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", + adapter->netdev->name, adapter->link_speed); + if (!netif_carrier_ok(adapter->netdev)) + netif_carrier_on(adapter->netdev); + + vmxnet3_tq_start(&adapter->tx_queue, adapter); + } else { + printk(KERN_INFO "%s: NIC Link is Down\n", + adapter->netdev->name); + if (netif_carrier_ok(adapter->netdev)) + netif_carrier_off(adapter->netdev); + + vmxnet3_tq_stop(&adapter->tx_queue, adapter); + } +} + + +static void +vmxnet3_process_events(struct vmxnet3_adapter *adapter) +{ + u32 events = adapter->shared->ecr; + if (!events) + return; + + vmxnet3_ack_events(adapter, events); + + /* Check if link state has changed */ + if (events & VMXNET3_ECR_LINK) + vmxnet3_check_link(adapter); + + /* Check if there is an error on xmit/recv queues */ + if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_GET_QUEUE_STATUS); + + if (adapter->tqd_start->status.stopped) { + printk(KERN_ERR "%s: tq error 0x%x\n", + adapter->netdev->name, + adapter->tqd_start->status.error); + } + if (adapter->rqd_start->status.stopped) { + printk(KERN_ERR "%s: rq error 0x%x\n", + adapter->netdev->name, + adapter->rqd_start->status.error); + } + + schedule_work(&adapter->work); + } +} + + +static void +vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi, + struct pci_dev *pdev) +{ + if (tbi->map_type == VMXNET3_MAP_SINGLE) + pci_unmap_single(pdev, tbi->dma_addr, tbi->len, + PCI_DMA_TODEVICE); + else if (tbi->map_type == VMXNET3_MAP_PAGE) + pci_unmap_page(pdev, tbi->dma_addr, tbi->len, + PCI_DMA_TODEVICE); + else + BUG_ON(tbi->map_type != VMXNET3_MAP_NONE); + + tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */ +} + + +static int +vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq, + struct pci_dev *pdev, struct vmxnet3_adapter *adapter) +{ + struct sk_buff *skb; + int entries = 0; + + /* no out of order completion */ + BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp); + BUG_ON(tq->tx_ring.base[eop_idx].txd.eop != 1); + + skb = tq->buf_info[eop_idx].skb; + BUG_ON(skb == NULL); + tq->buf_info[eop_idx].skb = NULL; + + VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size); + + while (tq->tx_ring.next2comp != eop_idx) { + vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp, + pdev); + + /* update next2comp w/o tx_lock. Since we are marking more, + * instead of less, tx ring entries avail, the worst case is + * that the tx routine incorrectly re-queues a pkt due to + * insufficient tx ring entries. + */ + vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); + entries++; + } + + dev_kfree_skb_any(skb); + return entries; +} + + +static int +vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq, + struct vmxnet3_adapter *adapter) +{ + int completed = 0; + union Vmxnet3_GenericDesc *gdesc; + + gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; + while (gdesc->tcd.gen == tq->comp_ring.gen) { + completed += vmxnet3_unmap_pkt(gdesc->tcd.txdIdx, tq, + adapter->pdev, adapter); + + vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring); + gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; + } + + if (completed) { + spin_lock(&tq->tx_lock); + if (unlikely(vmxnet3_tq_stopped(tq, adapter) && + vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) > + VMXNET3_WAKE_QUEUE_THRESHOLD(tq) && + netif_carrier_ok(adapter->netdev))) { + vmxnet3_tq_wake(tq, adapter); + } + spin_unlock(&tq->tx_lock); + } + return completed; +} + + +static void +vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq, + struct vmxnet3_adapter *adapter) +{ + int i; + + while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) { + struct vmxnet3_tx_buf_info *tbi; + union Vmxnet3_GenericDesc *gdesc; + + tbi = tq->buf_info + tq->tx_ring.next2comp; + gdesc = tq->tx_ring.base + tq->tx_ring.next2comp; + + vmxnet3_unmap_tx_buf(tbi, adapter->pdev); + if (tbi->skb) { + dev_kfree_skb_any(tbi->skb); + tbi->skb = NULL; + } + vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); + } + + /* sanity check, verify all buffers are indeed unmapped and freed */ + for (i = 0; i < tq->tx_ring.size; i++) { + BUG_ON(tq->buf_info[i].skb != NULL || + tq->buf_info[i].map_type != VMXNET3_MAP_NONE); + } + + tq->tx_ring.gen = VMXNET3_INIT_GEN; + tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; + + tq->comp_ring.gen = VMXNET3_INIT_GEN; + tq->comp_ring.next2proc = 0; +} + + +void +vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, + struct vmxnet3_adapter *adapter) +{ + if (tq->tx_ring.base) { + pci_free_consistent(adapter->pdev, tq->tx_ring.size * + sizeof(struct Vmxnet3_TxDesc), + tq->tx_ring.base, tq->tx_ring.basePA); + tq->tx_ring.base = NULL; + } + if (tq->data_ring.base) { + pci_free_consistent(adapter->pdev, tq->data_ring.size * + sizeof(struct Vmxnet3_TxDataDesc), + tq->data_ring.base, tq->data_ring.basePA); + tq->data_ring.base = NULL; + } + if (tq->comp_ring.base) { + pci_free_consistent(adapter->pdev, tq->comp_ring.size * + sizeof(struct Vmxnet3_TxCompDesc), + tq->comp_ring.base, tq->comp_ring.basePA); + tq->comp_ring.base = NULL; + } + kfree(tq->buf_info); + tq->buf_info = NULL; +} + + +static void +vmxnet3_tq_init(struct vmxnet3_tx_queue *tq, + struct vmxnet3_adapter *adapter) +{ + int i; + + /* reset the tx ring contents to 0 and reset the tx ring states */ + memset(tq->tx_ring.base, 0, tq->tx_ring.size * + sizeof(struct Vmxnet3_TxDesc)); + tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; + tq->tx_ring.gen = VMXNET3_INIT_GEN; + + memset(tq->data_ring.base, 0, tq->data_ring.size * + sizeof(struct Vmxnet3_TxDataDesc)); + + /* reset the tx comp ring contents to 0 and reset comp ring states */ + memset(tq->comp_ring.base, 0, tq->comp_ring.size * + sizeof(struct Vmxnet3_TxCompDesc)); + tq->comp_ring.next2proc = 0; + tq->comp_ring.gen = VMXNET3_INIT_GEN; + + /* reset the bookkeeping data */ + memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size); + for (i = 0; i < tq->tx_ring.size; i++) + tq->buf_info[i].map_type = VMXNET3_MAP_NONE; + + /* stats are not reset */ +} + + +static int +vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, + struct vmxnet3_adapter *adapter) +{ + BUG_ON(tq->tx_ring.base || tq->data_ring.base || + tq->comp_ring.base || tq->buf_info); + + tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size + * sizeof(struct Vmxnet3_TxDesc), + &tq->tx_ring.basePA); + if (!tq->tx_ring.base) { + printk(KERN_ERR "%s: failed to allocate tx ring\n", + adapter->netdev->name); + goto err; + } + + tq->data_ring.base = pci_alloc_consistent(adapter->pdev, + tq->data_ring.size * + sizeof(struct Vmxnet3_TxDataDesc), + &tq->data_ring.basePA); + if (!tq->data_ring.base) { + printk(KERN_ERR "%s: failed to allocate data ring\n", + adapter->netdev->name); + goto err; + } + + tq->comp_ring.base = pci_alloc_consistent(adapter->pdev, + tq->comp_ring.size * + sizeof(struct Vmxnet3_TxCompDesc), + &tq->comp_ring.basePA); + if (!tq->comp_ring.base) { + printk(KERN_ERR "%s: failed to allocate tx comp ring\n", + adapter->netdev->name); + goto err; + } + + tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]), + GFP_KERNEL); + if (!tq->buf_info) { + printk(KERN_ERR "%s: failed to allocate tx bufinfo\n", + adapter->netdev->name); + goto err; + } + + return 0; + +err: + vmxnet3_tq_destroy(tq, adapter); + return -ENOMEM; +} + + +/* + * starting from ring->next2fill, allocate rx buffers for the given ring + * of the rx queue and update the rx desc. stop after @num_to_alloc buffers + * are allocated or allocation fails + */ + +static int +vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, + int num_to_alloc, struct vmxnet3_adapter *adapter) +{ + int num_allocated = 0; + struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx]; + struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx]; + u32 val; + + while (num_allocated < num_to_alloc) { + struct vmxnet3_rx_buf_info *rbi; + union Vmxnet3_GenericDesc *gd; + + rbi = rbi_base + ring->next2fill; + gd = ring->base + ring->next2fill; + + if (rbi->buf_type == VMXNET3_RX_BUF_SKB) { + if (rbi->skb == NULL) { + rbi->skb = dev_alloc_skb(rbi->len + + NET_IP_ALIGN); + if (unlikely(rbi->skb == NULL)) { + rq->stats.rx_buf_alloc_failure++; + break; + } + rbi->skb->dev = adapter->netdev; + + skb_reserve(rbi->skb, NET_IP_ALIGN); + rbi->dma_addr = pci_map_single(adapter->pdev, + rbi->skb->data, rbi->len, + PCI_DMA_FROMDEVICE); + } else { + /* rx buffer skipped by the device */ + } + val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT; + } else { + BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE || + rbi->len != PAGE_SIZE); + + if (rbi->page == NULL) { + rbi->page = alloc_page(GFP_ATOMIC); + if (unlikely(rbi->page == NULL)) { + rq->stats.rx_buf_alloc_failure++; + break; + } + rbi->dma_addr = pci_map_page(adapter->pdev, + rbi->page, 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + } else { + /* rx buffers skipped by the device */ + } + val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT; + } + + BUG_ON(rbi->dma_addr == 0); + gd->rxd.addr = rbi->dma_addr; + gd->dword[2] = (ring->gen << VMXNET3_RXD_GEN_SHIFT) | val | + rbi->len; + + num_allocated++; + vmxnet3_cmd_ring_adv_next2fill(ring); + } + rq->uncommitted[ring_idx] += num_allocated; + + dprintk(KERN_ERR "alloc_rx_buf: %d allocated, next2fill %u, next2comp " + "%u, uncommited %u\n", num_allocated, ring->next2fill, + ring->next2comp, rq->uncommitted[ring_idx]); + + /* so that the device can distinguish a full ring and an empty ring */ + BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp); + + return num_allocated; +} + + +static void +vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd, + struct vmxnet3_rx_buf_info *rbi) +{ + struct skb_frag_struct *frag = skb_shinfo(skb)->frags + + skb_shinfo(skb)->nr_frags; + + BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); + + frag->page = rbi->page; + frag->page_offset = 0; + frag->size = rcd->len; + skb->data_len += frag->size; + skb_shinfo(skb)->nr_frags++; +} + + +static void +vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, + struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, + struct vmxnet3_adapter *adapter) +{ + u32 dw2, len; + unsigned long buf_offset; + int i; + union Vmxnet3_GenericDesc *gdesc; + struct vmxnet3_tx_buf_info *tbi = NULL; + + BUG_ON(ctx->copy_size > skb_headlen(skb)); + + /* use the previous gen bit for the SOP desc */ + dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT; + + ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill; + gdesc = ctx->sop_txd; /* both loops below can be skipped */ + + /* no need to map the buffer if headers are copied */ + if (ctx->copy_size) { + ctx->sop_txd->txd.addr = tq->data_ring.basePA + + tq->tx_ring.next2fill * + sizeof(struct Vmxnet3_TxDataDesc); + ctx->sop_txd->dword[2] = dw2 | ctx->copy_size; + ctx->sop_txd->dword[3] = 0; + + tbi = tq->buf_info + tq->tx_ring.next2fill; + tbi->map_type = VMXNET3_MAP_NONE; + + dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n", + tq->tx_ring.next2fill, ctx->sop_txd->txd.addr, + ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]); + vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); + + /* use the right gen for non-SOP desc */ + dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; + } + + /* linear part can use multiple tx desc if it's big */ + len = skb_headlen(skb) - ctx->copy_size; + buf_offset = ctx->copy_size; + while (len) { + u32 buf_size; + + buf_size = len > VMXNET3_MAX_TX_BUF_SIZE ? + VMXNET3_MAX_TX_BUF_SIZE : len; + + tbi = tq->buf_info + tq->tx_ring.next2fill; + tbi->map_type = VMXNET3_MAP_SINGLE; + tbi->dma_addr = pci_map_single(adapter->pdev, + skb->data + buf_offset, buf_size, + PCI_DMA_TODEVICE); + + tbi->len = buf_size; /* this automatically convert 2^14 to 0 */ + + gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; + BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); + + gdesc->txd.addr = tbi->dma_addr; + gdesc->dword[2] = dw2 | buf_size; + gdesc->dword[3] = 0; + + dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n", + tq->tx_ring.next2fill, gdesc->txd.addr, + gdesc->dword[2], gdesc->dword[3]); + vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); + dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; + + len -= buf_size; + buf_offset += buf_size; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + + tbi = tq->buf_info + tq->tx_ring.next2fill; + tbi->map_type = VMXNET3_MAP_PAGE; + tbi->dma_addr = pci_map_page(adapter->pdev, frag->page, + frag->page_offset, frag->size, + PCI_DMA_TODEVICE); + + tbi->len = frag->size; + + gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; + BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); + + gdesc->txd.addr = tbi->dma_addr; + gdesc->dword[2] = dw2 | frag->size; + gdesc->dword[3] = 0; + + dprintk(KERN_ERR "txd[%u]: 0x%llu %u %u\n", + tq->tx_ring.next2fill, gdesc->txd.addr, + gdesc->dword[2], gdesc->dword[3]); + vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); + dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; + } + + ctx->eop_txd = gdesc; + + /* set the last buf_info for the pkt */ + tbi->skb = skb; + tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; +} + + +/* + * parse and copy relevant protocol headers: + * For a tso pkt, relevant headers are L2/3/4 including options + * For a pkt requesting csum offloading, they are L2/3 and may include L4 + * if it's a TCP/UDP pkt + * + * Returns: + * -1: error happens during parsing + * 0: protocol headers parsed, but too big to be copied + * 1: protocol headers parsed and copied + * + * Other effects: + * 1. related *ctx fields are updated. + * 2. ctx->copy_size is # of bytes copied + * 3. the portion copied is guaranteed to be in the linear part + * + */ +static int +vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, + struct vmxnet3_tx_ctx *ctx, + struct vmxnet3_adapter *adapter) +{ + struct Vmxnet3_TxDataDesc *tdd; + + if (ctx->mss) { + ctx->eth_ip_hdr_size = skb_transport_offset(skb); + ctx->l4_hdr_size = ((struct tcphdr *) + skb_transport_header(skb))->doff * 4; + ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; + } else { + unsigned int pull_size; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + ctx->eth_ip_hdr_size = skb_transport_offset(skb); + + if (ctx->ipv4) { + struct iphdr *iph = (struct iphdr *) + skb_network_header(skb); + if (iph->protocol == IPPROTO_TCP) { + pull_size = ctx->eth_ip_hdr_size + + sizeof(struct tcphdr); + + if (unlikely(!pskb_may_pull(skb, + pull_size))) { + goto err; + } + ctx->l4_hdr_size = ((struct tcphdr *) + skb_transport_header(skb))->doff * 4; + } else if (iph->protocol == IPPROTO_UDP) { + ctx->l4_hdr_size = + sizeof(struct udphdr); + } else { + ctx->l4_hdr_size = 0; + } + } else { + /* for simplicity, don't copy L4 headers */ + ctx->l4_hdr_size = 0; + } + ctx->copy_size = ctx->eth_ip_hdr_size + + ctx->l4_hdr_size; + } else { + ctx->eth_ip_hdr_size = 0; + ctx->l4_hdr_size = 0; + /* copy as much as allowed */ + ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE + , skb_headlen(skb)); + } + + /* make sure headers are accessible directly */ + if (unlikely(!pskb_may_pull(skb, ctx->copy_size))) + goto err; + } + + if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) { + tq->stats.oversized_hdr++; + ctx->copy_size = 0; + return 0; + } + + tdd = tq->data_ring.base + tq->tx_ring.next2fill; + + memcpy(tdd->data, skb->data, ctx->copy_size); + dprintk(KERN_ERR "copy %u bytes to dataRing[%u]\n", + ctx->copy_size, tq->tx_ring.next2fill); + return 1; + +err: + return -1; +} + + +static void +vmxnet3_prepare_tso(struct sk_buff *skb, + struct vmxnet3_tx_ctx *ctx) +{ + struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb); + if (ctx->ipv4) { + struct iphdr *iph = (struct iphdr *)skb_network_header(skb); + iph->check = 0; + tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, + IPPROTO_TCP, 0); + } else { + struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb); + tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, + IPPROTO_TCP, 0); + } +} + + +/* + * Transmits a pkt thru a given tq + * Returns: + * NETDEV_TX_OK: descriptors are setup successfully + * NETDEV_TX_OK: error occured, the pkt is dropped + * NETDEV_TX_BUSY: tx ring is full, queue is stopped + * + * Side-effects: + * 1. tx ring may be changed + * 2. tq stats may be updated accordingly + * 3. shared->txNumDeferred may be updated + */ + +static int +vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, + struct vmxnet3_adapter *adapter, struct net_device *netdev) +{ + int ret; + u32 count; + unsigned long flags; + struct vmxnet3_tx_ctx ctx; + union Vmxnet3_GenericDesc *gdesc; + + /* conservatively estimate # of descriptors to use */ + count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + + skb_shinfo(skb)->nr_frags + 1; + + ctx.ipv4 = (skb->protocol == __constant_ntohs(ETH_P_IP)); + + ctx.mss = skb_shinfo(skb)->gso_size; + if (ctx.mss) { + if (skb_header_cloned(skb)) { + if (unlikely(pskb_expand_head(skb, 0, 0, + GFP_ATOMIC) != 0)) { + tq->stats.drop_tso++; + goto drop_pkt; + } + tq->stats.copy_skb_header++; + } + vmxnet3_prepare_tso(skb, &ctx); + } else { + if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) { + + /* non-tso pkts must not use more than + * VMXNET3_MAX_TXD_PER_PKT entries + */ + if (skb_linearize(skb) != 0) { + tq->stats.drop_too_many_frags++; + goto drop_pkt; + } + tq->stats.linearized++; + + /* recalculate the # of descriptors to use */ + count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1; + } + } + + ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter); + if (ret >= 0) { + BUG_ON(ret <= 0 && ctx.copy_size != 0); + /* hdrs parsed, check against other limits */ + if (ctx.mss) { + if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size > + VMXNET3_MAX_TX_BUF_SIZE)) { + goto hdr_too_big; + } + } else { + if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (unlikely(ctx.eth_ip_hdr_size + + skb->csum_offset > + VMXNET3_MAX_CSUM_OFFSET)) { + goto hdr_too_big; + } + } + } + } else { + tq->stats.drop_hdr_inspect_err++; + goto drop_pkt; + } + + spin_lock_irqsave(&tq->tx_lock, flags); + + if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { + tq->stats.tx_ring_full++; + dprintk(KERN_ERR "tx queue stopped on %s, next2comp %u" + " next2fill %u\n", adapter->netdev->name, + tq->tx_ring.next2comp, tq->tx_ring.next2fill); + + vmxnet3_tq_stop(tq, adapter); + spin_unlock_irqrestore(&tq->tx_lock, flags); + return NETDEV_TX_BUSY; + } + + /* fill tx descs related to addr & len */ + vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter); + + /* setup the EOP desc */ + ctx.eop_txd->dword[3] = VMXNET3_TXD_CQ | VMXNET3_TXD_EOP; + + /* setup the SOP desc */ + gdesc = ctx.sop_txd; + if (ctx.mss) { + gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size; + gdesc->txd.om = VMXNET3_OM_TSO; + gdesc->txd.msscof = ctx.mss; + tq->shared->txNumDeferred += (skb->len - gdesc->txd.hlen + + ctx.mss - 1) / ctx.mss; + } else { + if (skb->ip_summed == CHECKSUM_PARTIAL) { + gdesc->txd.hlen = ctx.eth_ip_hdr_size; + gdesc->txd.om = VMXNET3_OM_CSUM; + gdesc->txd.msscof = ctx.eth_ip_hdr_size + + skb->csum_offset; + } else { + gdesc->txd.om = 0; + gdesc->txd.msscof = 0; + } + tq->shared->txNumDeferred++; + } + + if (vlan_tx_tag_present(skb)) { + gdesc->txd.ti = 1; + gdesc->txd.tci = vlan_tx_tag_get(skb); + } + + wmb(); + + /* finally flips the GEN bit of the SOP desc */ + gdesc->dword[2] ^= VMXNET3_TXD_GEN; + dprintk(KERN_ERR "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", + (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd - + tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2], + gdesc->dword[3]); + + spin_unlock_irqrestore(&tq->tx_lock, flags); + + if (tq->shared->txNumDeferred >= tq->shared->txThreshold) { + tq->shared->txNumDeferred = 0; + VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD, + tq->tx_ring.next2fill); + } + netdev->trans_start = jiffies; + + return NETDEV_TX_OK; + +hdr_too_big: + tq->stats.drop_oversized_hdr++; +drop_pkt: + tq->stats.drop_total++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + + +static netdev_tx_t +vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + struct vmxnet3_tx_queue *tq = &adapter->tx_queue; + + return vmxnet3_tq_xmit(skb, tq, adapter, netdev); +} + + +static void +vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, + struct sk_buff *skb, + union Vmxnet3_GenericDesc *gdesc) +{ + if (!gdesc->rcd.cnc && adapter->rxcsum) { + /* typical case: TCP/UDP over IP and both csums are correct */ + if ((gdesc->dword[3] & VMXNET3_RCD_CSUM_OK) == + VMXNET3_RCD_CSUM_OK) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); + BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6)); + BUG_ON(gdesc->rcd.frg); + } else { + if (gdesc->rcd.csum) { + skb->csum = htons(gdesc->rcd.csum); + skb->ip_summed = CHECKSUM_PARTIAL; + } else { + skb->ip_summed = CHECKSUM_NONE; + } + } + } else { + skb->ip_summed = CHECKSUM_NONE; + } +} + + +static void +vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd, + struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter) +{ + rq->stats.drop_err++; + if (!rcd->fcs) + rq->stats.drop_fcs++; + + rq->stats.drop_total++; + + /* + * We do not unmap and chain the rx buffer to the skb. + * We basically pretend this buffer is not used and will be recycled + * by vmxnet3_rq_alloc_rx_buf() + */ + + /* + * ctx->skb may be NULL if this is the first and the only one + * desc for the pkt + */ + if (ctx->skb) + dev_kfree_skb_irq(ctx->skb); + + ctx->skb = NULL; +} + + +static int +vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, + struct vmxnet3_adapter *adapter, int quota) +{ + static u32 rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2}; + u32 num_rxd = 0; + struct Vmxnet3_RxCompDesc *rcd; + struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; + + rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd; + while (rcd->gen == rq->comp_ring.gen) { + struct vmxnet3_rx_buf_info *rbi; + struct sk_buff *skb; + int num_to_alloc; + struct Vmxnet3_RxDesc *rxd; + u32 idx, ring_idx; + + if (num_rxd >= quota) { + /* we may stop even before we see the EOP desc of + * the current pkt + */ + break; + } + num_rxd++; + + idx = rcd->rxdIdx; + ring_idx = rcd->rqID == rq->qid ? 0 : 1; + + rxd = &rq->rx_ring[ring_idx].base[idx].rxd; + rbi = rq->buf_info[ring_idx] + idx; + + BUG_ON(rxd->addr != rbi->dma_addr || rxd->len != rbi->len); + + if (unlikely(rcd->eop && rcd->err)) { + vmxnet3_rx_error(rq, rcd, ctx, adapter); + goto rcd_done; + } + + if (rcd->sop) { /* first buf of the pkt */ + BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD || + rcd->rqID != rq->qid); + + BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB); + BUG_ON(ctx->skb != NULL || rbi->skb == NULL); + + if (unlikely(rcd->len == 0)) { + /* Pretend the rx buffer is skipped. */ + BUG_ON(!(rcd->sop && rcd->eop)); + dprintk(KERN_ERR "rxRing[%u][%u] 0 length\n", + ring_idx, idx); + goto rcd_done; + } + + ctx->skb = rbi->skb; + rbi->skb = NULL; + + pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len, + PCI_DMA_FROMDEVICE); + + skb_put(ctx->skb, rcd->len); + } else { + BUG_ON(ctx->skb == NULL); + /* non SOP buffer must be type 1 in most cases */ + if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) { + BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY); + + if (rcd->len) { + pci_unmap_page(adapter->pdev, + rbi->dma_addr, rbi->len, + PCI_DMA_FROMDEVICE); + + vmxnet3_append_frag(ctx->skb, rcd, rbi); + rbi->page = NULL; + } + } else { + /* + * The only time a non-SOP buffer is type 0 is + * when it's EOP and error flag is raised, which + * has already been handled. + */ + BUG_ON(true); + } + } + + skb = ctx->skb; + if (rcd->eop) { + skb->len += skb->data_len; + skb->truesize += skb->data_len; + + vmxnet3_rx_csum(adapter, skb, + (union Vmxnet3_GenericDesc *)rcd); + skb->protocol = eth_type_trans(skb, adapter->netdev); + + if (unlikely(adapter->vlan_grp && rcd->ts)) { + vlan_hwaccel_receive_skb(skb, + adapter->vlan_grp, rcd->tci); + } else { + netif_receive_skb(skb); + } + + adapter->netdev->last_rx = jiffies; + ctx->skb = NULL; + } + +rcd_done: + /* device may skip some rx descs */ + rq->rx_ring[ring_idx].next2comp = idx; + VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp, + rq->rx_ring[ring_idx].size); + + /* refill rx buffers frequently to avoid starving the h/w */ + num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring + + ring_idx); + if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq, + ring_idx, adapter))) { + vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc, + adapter); + + /* if needed, update the register */ + if (unlikely(rq->shared->updateRxProd)) { + VMXNET3_WRITE_BAR0_REG(adapter, + rxprod_reg[ring_idx] + rq->qid * 8, + rq->rx_ring[ring_idx].next2fill); + rq->uncommitted[ring_idx] = 0; + } + } + + vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); + rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd; + } + + return num_rxd; +} + + +static void +vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, + struct vmxnet3_adapter *adapter) +{ + u32 i, ring_idx; + struct Vmxnet3_RxDesc *rxd; + + for (ring_idx = 0; ring_idx < 2; ring_idx++) { + for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { + rxd = &rq->rx_ring[ring_idx].base[i].rxd; + + if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && + rq->buf_info[ring_idx][i].skb) { + pci_unmap_single(adapter->pdev, rxd->addr, + rxd->len, PCI_DMA_FROMDEVICE); + dev_kfree_skb(rq->buf_info[ring_idx][i].skb); + rq->buf_info[ring_idx][i].skb = NULL; + } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY && + rq->buf_info[ring_idx][i].page) { + pci_unmap_page(adapter->pdev, rxd->addr, + rxd->len, PCI_DMA_FROMDEVICE); + put_page(rq->buf_info[ring_idx][i].page); + rq->buf_info[ring_idx][i].page = NULL; + } + } + + rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN; + rq->rx_ring[ring_idx].next2fill = + rq->rx_ring[ring_idx].next2comp = 0; + rq->uncommitted[ring_idx] = 0; + } + + rq->comp_ring.gen = VMXNET3_INIT_GEN; + rq->comp_ring.next2proc = 0; +} + + +void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, + struct vmxnet3_adapter *adapter) +{ + int i; + int j; + + /* all rx buffers must have already been freed */ + for (i = 0; i < 2; i++) { + if (rq->buf_info[i]) { + for (j = 0; j < rq->rx_ring[i].size; j++) + BUG_ON(rq->buf_info[i][j].page != NULL); + } + } + + + kfree(rq->buf_info[0]); + + for (i = 0; i < 2; i++) { + if (rq->rx_ring[i].base) { + pci_free_consistent(adapter->pdev, rq->rx_ring[i].size + * sizeof(struct Vmxnet3_RxDesc), + rq->rx_ring[i].base, + rq->rx_ring[i].basePA); + rq->rx_ring[i].base = NULL; + } + rq->buf_info[i] = NULL; + } + + if (rq->comp_ring.base) { + pci_free_consistent(adapter->pdev, rq->comp_ring.size * + sizeof(struct Vmxnet3_RxCompDesc), + rq->comp_ring.base, rq->comp_ring.basePA); + rq->comp_ring.base = NULL; + } +} + + +static int +vmxnet3_rq_init(struct vmxnet3_rx_queue *rq, + struct vmxnet3_adapter *adapter) +{ + int i; + + /* initialize buf_info */ + for (i = 0; i < rq->rx_ring[0].size; i++) { + + /* 1st buf for a pkt is skbuff */ + if (i % adapter->rx_buf_per_pkt == 0) { + rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB; + rq->buf_info[0][i].len = adapter->skb_buf_size; + } else { /* subsequent bufs for a pkt is frag */ + rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE; + rq->buf_info[0][i].len = PAGE_SIZE; + } + } + for (i = 0; i < rq->rx_ring[1].size; i++) { + rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE; + rq->buf_info[1][i].len = PAGE_SIZE; + } + + /* reset internal state and allocate buffers for both rings */ + for (i = 0; i < 2; i++) { + rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0; + rq->uncommitted[i] = 0; + + memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size * + sizeof(struct Vmxnet3_RxDesc)); + rq->rx_ring[i].gen = VMXNET3_INIT_GEN; + } + if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1, + adapter) == 0) { + /* at least has 1 rx buffer for the 1st ring */ + return -ENOMEM; + } + vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter); + + /* reset the comp ring */ + rq->comp_ring.next2proc = 0; + memset(rq->comp_ring.base, 0, rq->comp_ring.size * + sizeof(struct Vmxnet3_RxCompDesc)); + rq->comp_ring.gen = VMXNET3_INIT_GEN; + + /* reset rxctx */ + rq->rx_ctx.skb = NULL; + + /* stats are not reset */ + return 0; +} + + +static int +vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) +{ + int i; + size_t sz; + struct vmxnet3_rx_buf_info *bi; + + for (i = 0; i < 2; i++) { + + sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc); + rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz, + &rq->rx_ring[i].basePA); + if (!rq->rx_ring[i].base) { + printk(KERN_ERR "%s: failed to allocate rx ring %d\n", + adapter->netdev->name, i); + goto err; + } + } + + sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc); + rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz, + &rq->comp_ring.basePA); + if (!rq->comp_ring.base) { + printk(KERN_ERR "%s: failed to allocate rx comp ring\n", + adapter->netdev->name); + goto err; + } + + sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + + rq->rx_ring[1].size); + bi = kmalloc(sz, GFP_KERNEL); + if (!bi) { + printk(KERN_ERR "%s: failed to allocate rx bufinfo\n", + adapter->netdev->name); + goto err; + } + memset(bi, 0, sz); + rq->buf_info[0] = bi; + rq->buf_info[1] = bi + rq->rx_ring[0].size; + + return 0; + +err: + vmxnet3_rq_destroy(rq, adapter); + return -ENOMEM; +} + + +static int +vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget) +{ + if (unlikely(adapter->shared->ecr)) + vmxnet3_process_events(adapter); + + vmxnet3_tq_tx_complete(&adapter->tx_queue, adapter); + return vmxnet3_rq_rx_complete(&adapter->rx_queue, adapter, budget); +} + + +static int +vmxnet3_poll(struct napi_struct *napi, int budget) +{ + struct vmxnet3_adapter *adapter = container_of(napi, + struct vmxnet3_adapter, napi); + int rxd_done; + + rxd_done = vmxnet3_do_poll(adapter, budget); + + if (rxd_done < budget) { + napi_complete(napi); + vmxnet3_enable_intr(adapter, 0); + } + return rxd_done; +} + + +/* Interrupt handler for vmxnet3 */ +static irqreturn_t +vmxnet3_intr(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct vmxnet3_adapter *adapter = netdev_priv(dev); + + if (unlikely(adapter->intr.type == VMXNET3_IT_INTX)) { + u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR); + if (unlikely(icr == 0)) + /* not ours */ + return IRQ_NONE; + } + + + /* disable intr if needed */ + if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) + vmxnet3_disable_intr(adapter, 0); + + napi_schedule(&adapter->napi); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER + + +/* netpoll callback. */ +static void +vmxnet3_netpoll(struct net_device *netdev) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + int irq; + +#ifdef CONFIG_PCI_MSI + if (adapter->intr.type == VMXNET3_IT_MSIX) + irq = adapter->intr.msix_entries[0].vector; + else +#endif + irq = adapter->pdev->irq; + + disable_irq(irq); + vmxnet3_intr(irq, netdev); + enable_irq(irq); +} +#endif + +static int +vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) +{ + int err; + +#ifdef CONFIG_PCI_MSI + if (adapter->intr.type == VMXNET3_IT_MSIX) { + /* we only use 1 MSI-X vector */ + err = request_irq(adapter->intr.msix_entries[0].vector, + vmxnet3_intr, 0, adapter->netdev->name, + adapter->netdev); + } else +#endif + if (adapter->intr.type == VMXNET3_IT_MSI) { + err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, + adapter->netdev->name, adapter->netdev); + } else { + err = request_irq(adapter->pdev->irq, vmxnet3_intr, + IRQF_SHARED, adapter->netdev->name, + adapter->netdev); + } + + if (err) + printk(KERN_ERR "Failed to request irq %s (intr type:%d), error" + ":%d\n", adapter->netdev->name, adapter->intr.type, err); + + + if (!err) { + int i; + /* init our intr settings */ + for (i = 0; i < adapter->intr.num_intrs; i++) + adapter->intr.mod_levels[i] = UPT1_IML_ADAPTIVE; + + /* next setup intr index for all intr sources */ + adapter->tx_queue.comp_ring.intr_idx = 0; + adapter->rx_queue.comp_ring.intr_idx = 0; + adapter->intr.event_intr_idx = 0; + + printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors " + "allocated\n", adapter->netdev->name, adapter->intr.type, + adapter->intr.mask_mode, adapter->intr.num_intrs); + } + + return err; +} + + +static void +vmxnet3_free_irqs(struct vmxnet3_adapter *adapter) +{ + BUG_ON(adapter->intr.type == VMXNET3_IT_AUTO || + adapter->intr.num_intrs <= 0); + + switch (adapter->intr.type) { +#ifdef CONFIG_PCI_MSI + case VMXNET3_IT_MSIX: + { + int i; + + for (i = 0; i < adapter->intr.num_intrs; i++) + free_irq(adapter->intr.msix_entries[i].vector, + adapter->netdev); + break; + } +#endif + case VMXNET3_IT_MSI: + free_irq(adapter->pdev->irq, adapter->netdev); + break; + case VMXNET3_IT_INTX: + free_irq(adapter->pdev->irq, adapter->netdev); + break; + default: + BUG_ON(true); + } +} + + +static void +vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + struct Vmxnet3_DriverShared *shared = adapter->shared; + u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; + + if (grp) { + /* add vlan rx stripping. */ + if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) { + int i; + struct Vmxnet3_DSDevRead *devRead = &shared->devRead; + adapter->vlan_grp = grp; + + /* update FEATURES to device */ + devRead->misc.uptFeatures |= UPT1_F_RXVLAN; + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_FEATURE); + /* + * Clear entire vfTable; then enable untagged pkts. + * Note: setting one entry in vfTable to non-zero turns + * on VLAN rx filtering. + */ + for (i = 0; i < VMXNET3_VFT_SIZE; i++) + vfTable[i] = 0; + + VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_VLAN_FILTERS); + } else { + printk(KERN_ERR "%s: vlan_rx_register when device has " + "no NETIF_F_HW_VLAN_RX\n", netdev->name); + } + } else { + /* remove vlan rx stripping. */ + struct Vmxnet3_DSDevRead *devRead = &shared->devRead; + adapter->vlan_grp = NULL; + + if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) { + int i; + + for (i = 0; i < VMXNET3_VFT_SIZE; i++) { + /* clear entire vfTable; this also disables + * VLAN rx filtering + */ + vfTable[i] = 0; + } + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_VLAN_FILTERS); + + /* update FEATURES to device */ + devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN; + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_FEATURE); + } + } +} + + +static void +vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter) +{ + if (adapter->vlan_grp) { + u16 vid; + u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; + bool activeVlan = false; + + for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { + if (vlan_group_get_device(adapter->vlan_grp, vid)) { + VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); + activeVlan = true; + } + } + if (activeVlan) { + /* continue to allow untagged pkts */ + VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); + } + } +} + + +static void +vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; + + VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_VLAN_FILTERS); +} + + +static void +vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; + + VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_VLAN_FILTERS); +} + + +static u8 * +vmxnet3_copy_mc(struct net_device *netdev) +{ + u8 *buf = NULL; + u32 sz = netdev->mc_count * ETH_ALEN; + + /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */ + if (sz <= 0xffff) { + /* We may be called with BH disabled */ + buf = kmalloc(sz, GFP_ATOMIC); + if (buf) { + int i; + struct dev_mc_list *mc = netdev->mc_list; + + for (i = 0; i < netdev->mc_count; i++) { + BUG_ON(!mc); + memcpy(buf + i * ETH_ALEN, mc->dmi_addr, + ETH_ALEN); + mc = mc->next; + } + } + } + return buf; +} + + +static void +vmxnet3_set_mc(struct net_device *netdev) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + struct Vmxnet3_RxFilterConf *rxConf = + &adapter->shared->devRead.rxFilterConf; + u8 *new_table = NULL; + u32 new_mode = VMXNET3_RXM_UCAST; + + if (netdev->flags & IFF_PROMISC) + new_mode |= VMXNET3_RXM_PROMISC; + + if (netdev->flags & IFF_BROADCAST) + new_mode |= VMXNET3_RXM_BCAST; + + if (netdev->flags & IFF_ALLMULTI) + new_mode |= VMXNET3_RXM_ALL_MULTI; + else + if (netdev->mc_count > 0) { + new_table = vmxnet3_copy_mc(netdev); + if (new_table) { + new_mode |= VMXNET3_RXM_MCAST; + rxConf->mfTableLen = netdev->mc_count * + ETH_ALEN; + rxConf->mfTablePA = virt_to_phys(new_table); + } else { + printk(KERN_INFO "%s: failed to copy mcast list" + ", setting ALL_MULTI\n", netdev->name); + new_mode |= VMXNET3_RXM_ALL_MULTI; + } + } + + + if (!(new_mode & VMXNET3_RXM_MCAST)) { + rxConf->mfTableLen = 0; + rxConf->mfTablePA = 0; + } + + if (new_mode != rxConf->rxMode) { + rxConf->rxMode = new_mode; + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_RX_MODE); + } + + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_MAC_FILTERS); + + kfree(new_table); +} + + +/* + * Set up driver_shared based on settings in adapter. + */ + +static void +vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) +{ + struct Vmxnet3_DriverShared *shared = adapter->shared; + struct Vmxnet3_DSDevRead *devRead = &shared->devRead; + struct Vmxnet3_TxQueueConf *tqc; + struct Vmxnet3_RxQueueConf *rqc; + int i; + + memset(shared, 0, sizeof(*shared)); + + /* driver settings */ + shared->magic = VMXNET3_REV1_MAGIC; + devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM; + devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ? + VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64); + devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX; + devRead->misc.driverInfo.vmxnet3RevSpt = 1; + devRead->misc.driverInfo.uptVerSpt = 1; + + devRead->misc.ddPA = virt_to_phys(adapter); + devRead->misc.ddLen = sizeof(struct vmxnet3_adapter); + + /* set up feature flags */ + if (adapter->rxcsum) + devRead->misc.uptFeatures |= UPT1_F_RXCSUM; + + if (adapter->lro) { + devRead->misc.uptFeatures |= UPT1_F_LRO; + devRead->misc.maxNumRxSG = 1 + MAX_SKB_FRAGS; + } + if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) + && adapter->vlan_grp) { + devRead->misc.uptFeatures |= UPT1_F_RXVLAN; + } + + devRead->misc.mtu = adapter->netdev->mtu; + devRead->misc.queueDescPA = adapter->queue_desc_pa; + devRead->misc.queueDescLen = sizeof(struct Vmxnet3_TxQueueDesc) + + sizeof(struct Vmxnet3_RxQueueDesc); + + /* tx queue settings */ + BUG_ON(adapter->tx_queue.tx_ring.base == NULL); + + devRead->misc.numTxQueues = 1; + tqc = &adapter->tqd_start->conf; + tqc->txRingBasePA = adapter->tx_queue.tx_ring.basePA; + tqc->dataRingBasePA = adapter->tx_queue.data_ring.basePA; + tqc->compRingBasePA = adapter->tx_queue.comp_ring.basePA; + tqc->ddPA = virt_to_phys(adapter->tx_queue.buf_info); + tqc->txRingSize = adapter->tx_queue.tx_ring.size; + tqc->dataRingSize = adapter->tx_queue.data_ring.size; + tqc->compRingSize = adapter->tx_queue.comp_ring.size; + tqc->ddLen = sizeof(struct vmxnet3_tx_buf_info) * + tqc->txRingSize; + tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx; + + /* rx queue settings */ + devRead->misc.numRxQueues = 1; + rqc = &adapter->rqd_start->conf; + rqc->rxRingBasePA[0] = adapter->rx_queue.rx_ring[0].basePA; + rqc->rxRingBasePA[1] = adapter->rx_queue.rx_ring[1].basePA; + rqc->compRingBasePA = adapter->rx_queue.comp_ring.basePA; + rqc->ddPA = virt_to_phys(adapter->rx_queue.buf_info); + rqc->rxRingSize[0] = adapter->rx_queue.rx_ring[0].size; + rqc->rxRingSize[1] = adapter->rx_queue.rx_ring[1].size; + rqc->compRingSize = adapter->rx_queue.comp_ring.size; + rqc->ddLen = sizeof(struct vmxnet3_rx_buf_info) * + (rqc->rxRingSize[0] + rqc->rxRingSize[1]); + rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx; + + /* intr settings */ + devRead->intrConf.autoMask = adapter->intr.mask_mode == + VMXNET3_IMM_AUTO; + devRead->intrConf.numIntrs = adapter->intr.num_intrs; + for (i = 0; i < adapter->intr.num_intrs; i++) + devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i]; + + devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx; + + /* rx filter settings */ + devRead->rxFilterConf.rxMode = 0; + vmxnet3_restore_vlan(adapter); + /* the rest are already zeroed */ +} + + +int +vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) +{ + int err; + u32 ret; + + dprintk(KERN_ERR "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes" + " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size, + adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size, + adapter->rx_queue.rx_ring[0].size, + adapter->rx_queue.rx_ring[1].size); + + vmxnet3_tq_init(&adapter->tx_queue, adapter); + err = vmxnet3_rq_init(&adapter->rx_queue, adapter); + if (err) { + printk(KERN_ERR "Failed to init rx queue for %s: error %d\n", + adapter->netdev->name, err); + goto rq_err; + } + + err = vmxnet3_request_irqs(adapter); + if (err) { + printk(KERN_ERR "Failed to setup irq for %s: error %d\n", + adapter->netdev->name, err); + goto irq_err; + } + + vmxnet3_setup_driver_shared(adapter); + + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, + VMXNET3_GET_ADDR_LO(adapter->shared_pa)); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, + VMXNET3_GET_ADDR_HI(adapter->shared_pa)); + + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_ACTIVATE_DEV); + ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + + if (ret != 0) { + printk(KERN_ERR "Failed to activate dev %s: error %u\n", + adapter->netdev->name, ret); + err = -EINVAL; + goto activate_err; + } + VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD, + adapter->rx_queue.rx_ring[0].next2fill); + VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD2, + adapter->rx_queue.rx_ring[1].next2fill); + + /* Apply the rx filter settins last. */ + vmxnet3_set_mc(adapter->netdev); + + /* + * Check link state when first activating device. It will start the + * tx queue if the link is up. + */ + vmxnet3_check_link(adapter); + + napi_enable(&adapter->napi); + vmxnet3_enable_all_intrs(adapter); + clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); + return 0; + +activate_err: + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0); + vmxnet3_free_irqs(adapter); +irq_err: +rq_err: + /* free up buffers we allocated */ + vmxnet3_rq_cleanup(&adapter->rx_queue, adapter); + return err; +} + + +void +vmxnet3_reset_dev(struct vmxnet3_adapter *adapter) +{ + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); +} + + +int +vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) +{ + if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) + return 0; + + + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_QUIESCE_DEV); + vmxnet3_disable_all_intrs(adapter); + + napi_disable(&adapter->napi); + netif_tx_disable(adapter->netdev); + adapter->link_speed = 0; + netif_carrier_off(adapter->netdev); + + vmxnet3_tq_cleanup(&adapter->tx_queue, adapter); + vmxnet3_rq_cleanup(&adapter->rx_queue, adapter); + vmxnet3_free_irqs(adapter); + return 0; +} + + +static void +vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac) +{ + u32 tmp; + + tmp = *(u32 *)mac; + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp); + + tmp = (mac[5] << 8) | mac[4]; + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp); +} + + +static int +vmxnet3_set_mac_addr(struct net_device *netdev, void *p) +{ + struct sockaddr *addr = p; + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + vmxnet3_write_mac_addr(adapter, addr->sa_data); + + return 0; +} + + +/* ==================== initialization and cleanup routines ============ */ + +static int +vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) +{ + int err; + unsigned long mmio_start, mmio_len; + struct pci_dev *pdev = adapter->pdev; + + err = pci_enable_device(pdev); + if (err) { + printk(KERN_ERR "Failed to enable adapter %s: error %d\n", + pci_name(pdev), err); + return err; + } + + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { + printk(KERN_ERR "pci_set_consistent_dma_mask failed " + "for adapter %s\n", pci_name(pdev)); + err = -EIO; + goto err_set_mask; + } + *dma64 = true; + } else { + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { + printk(KERN_ERR "pci_set_dma_mask failed for adapter " + "%s\n", pci_name(pdev)); + err = -EIO; + goto err_set_mask; + } + *dma64 = false; + } + + err = pci_request_selected_regions(pdev, (1 << 2) - 1, + vmxnet3_driver_name); + if (err) { + printk(KERN_ERR "Failed to request region for adapter %s: " + "error %d\n", pci_name(pdev), err); + goto err_set_mask; + } + + pci_set_master(pdev); + + mmio_start = pci_resource_start(pdev, 0); + mmio_len = pci_resource_len(pdev, 0); + adapter->hw_addr0 = ioremap(mmio_start, mmio_len); + if (!adapter->hw_addr0) { + printk(KERN_ERR "Failed to map bar0 for adapter %s\n", + pci_name(pdev)); + err = -EIO; + goto err_ioremap; + } + + mmio_start = pci_resource_start(pdev, 1); + mmio_len = pci_resource_len(pdev, 1); + adapter->hw_addr1 = ioremap(mmio_start, mmio_len); + if (!adapter->hw_addr1) { + printk(KERN_ERR "Failed to map bar1 for adapter %s\n", + pci_name(pdev)); + err = -EIO; + goto err_bar1; + } + return 0; + +err_bar1: + iounmap(adapter->hw_addr0); +err_ioremap: + pci_release_selected_regions(pdev, (1 << 2) - 1); +err_set_mask: + pci_disable_device(pdev); + return err; +} + + +static void +vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter) +{ + BUG_ON(!adapter->pdev); + + iounmap(adapter->hw_addr0); + iounmap(adapter->hw_addr1); + pci_release_selected_regions(adapter->pdev, (1 << 2) - 1); + pci_disable_device(adapter->pdev); +} + + +static void +vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter) +{ + size_t sz; + + if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE - + VMXNET3_MAX_ETH_HDR_SIZE) { + adapter->skb_buf_size = adapter->netdev->mtu + + VMXNET3_MAX_ETH_HDR_SIZE; + if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE) + adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE; + + adapter->rx_buf_per_pkt = 1; + } else { + adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE; + sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE + + VMXNET3_MAX_ETH_HDR_SIZE; + adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE; + } + + /* + * for simplicity, force the ring0 size to be a multiple of + * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN + */ + sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; + adapter->rx_queue.rx_ring[0].size = (adapter->rx_queue.rx_ring[0].size + + sz - 1) / sz * sz; + adapter->rx_queue.rx_ring[0].size = min_t(u32, + adapter->rx_queue.rx_ring[0].size, + VMXNET3_RX_RING_MAX_SIZE / sz * sz); +} + + +int +vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, + u32 rx_ring_size, u32 rx_ring2_size) +{ + int err; + + adapter->tx_queue.tx_ring.size = tx_ring_size; + adapter->tx_queue.data_ring.size = tx_ring_size; + adapter->tx_queue.comp_ring.size = tx_ring_size; + adapter->tx_queue.shared = &adapter->tqd_start->ctrl; + adapter->tx_queue.stopped = true; + err = vmxnet3_tq_create(&adapter->tx_queue, adapter); + if (err) + return err; + + adapter->rx_queue.rx_ring[0].size = rx_ring_size; + adapter->rx_queue.rx_ring[1].size = rx_ring2_size; + vmxnet3_adjust_rx_ring_size(adapter); + adapter->rx_queue.comp_ring.size = adapter->rx_queue.rx_ring[0].size + + adapter->rx_queue.rx_ring[1].size; + adapter->rx_queue.qid = 0; + adapter->rx_queue.qid2 = 1; + adapter->rx_queue.shared = &adapter->rqd_start->ctrl; + err = vmxnet3_rq_create(&adapter->rx_queue, adapter); + if (err) + vmxnet3_tq_destroy(&adapter->tx_queue, adapter); + + return err; +} + +static int +vmxnet3_open(struct net_device *netdev) +{ + struct vmxnet3_adapter *adapter; + int err; + + adapter = netdev_priv(netdev); + + spin_lock_init(&adapter->tx_queue.tx_lock); + + err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE, + VMXNET3_DEF_RX_RING_SIZE, + VMXNET3_DEF_RX_RING_SIZE); + if (err) + goto queue_err; + + err = vmxnet3_activate_dev(adapter); + if (err) + goto activate_err; + + return 0; + +activate_err: + vmxnet3_rq_destroy(&adapter->rx_queue, adapter); + vmxnet3_tq_destroy(&adapter->tx_queue, adapter); +queue_err: + return err; +} + + +static int +vmxnet3_close(struct net_device *netdev) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + /* + * Reset_work may be in the middle of resetting the device, wait for its + * completion. + */ + while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) + msleep(1); + + vmxnet3_quiesce_dev(adapter); + + vmxnet3_rq_destroy(&adapter->rx_queue, adapter); + vmxnet3_tq_destroy(&adapter->tx_queue, adapter); + + clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); + + + return 0; +} + + +void +vmxnet3_force_close(struct vmxnet3_adapter *adapter) +{ + /* + * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise + * vmxnet3_close() will deadlock. + */ + BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)); + + /* we need to enable NAPI, otherwise dev_close will deadlock */ + napi_enable(&adapter->napi); + dev_close(adapter->netdev); +} + + +static int +vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + int err = 0; + + if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU) + return -EINVAL; + + if (new_mtu > 1500 && !adapter->jumbo_frame) + return -EINVAL; + + netdev->mtu = new_mtu; + + /* + * Reset_work may be in the middle of resetting the device, wait for its + * completion. + */ + while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) + msleep(1); + + if (netif_running(netdev)) { + vmxnet3_quiesce_dev(adapter); + vmxnet3_reset_dev(adapter); + + /* we need to re-create the rx queue based on the new mtu */ + vmxnet3_rq_destroy(&adapter->rx_queue, adapter); + vmxnet3_adjust_rx_ring_size(adapter); + adapter->rx_queue.comp_ring.size = + adapter->rx_queue.rx_ring[0].size + + adapter->rx_queue.rx_ring[1].size; + err = vmxnet3_rq_create(&adapter->rx_queue, adapter); + if (err) { + printk(KERN_ERR "%s: failed to re-create rx queue," + " error %d. Closing it.\n", netdev->name, err); + goto out; + } + + err = vmxnet3_activate_dev(adapter); + if (err) { + printk(KERN_ERR "%s: failed to re-activate, error %d. " + "Closing it\n", netdev->name, err); + goto out; + } + } + +out: + clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); + if (err) + vmxnet3_force_close(adapter); + + return err; +} + + +static void +vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64) +{ + struct net_device *netdev = adapter->netdev; + + netdev->features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_FILTER | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_LRO; + + printk(KERN_INFO "features: sg csum vlan jf tso tsoIPv6 lro"); + + adapter->rxcsum = true; + adapter->jumbo_frame = true; + adapter->lro = true; + + if (dma64) { + netdev->features |= NETIF_F_HIGHDMA; + printk(" highDMA"); + } + + netdev->vlan_features = netdev->features; + printk("\n"); +} + + +static void +vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac) +{ + u32 tmp; + + tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL); + *(u32 *)mac = tmp; + + tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH); + mac[4] = tmp & 0xff; + mac[5] = (tmp >> 8) & 0xff; +} + + +static void +vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) +{ + u32 cfg; + + /* intr settings */ + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_GET_CONF_INTR); + cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + adapter->intr.type = cfg & 0x3; + adapter->intr.mask_mode = (cfg >> 2) & 0x3; + + if (adapter->intr.type == VMXNET3_IT_AUTO) { + int err; + +#ifdef CONFIG_PCI_MSI + adapter->intr.msix_entries[0].entry = 0; + err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries, + VMXNET3_LINUX_MAX_MSIX_VECT); + if (!err) { + adapter->intr.num_intrs = 1; + adapter->intr.type = VMXNET3_IT_MSIX; + return; + } +#endif + + err = pci_enable_msi(adapter->pdev); + if (!err) { + adapter->intr.num_intrs = 1; + adapter->intr.type = VMXNET3_IT_MSI; + return; + } + } + + adapter->intr.type = VMXNET3_IT_INTX; + + /* INT-X related setting */ + adapter->intr.num_intrs = 1; +} + + +static void +vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter) +{ + if (adapter->intr.type == VMXNET3_IT_MSIX) + pci_disable_msix(adapter->pdev); + else if (adapter->intr.type == VMXNET3_IT_MSI) + pci_disable_msi(adapter->pdev); + else + BUG_ON(adapter->intr.type != VMXNET3_IT_INTX); +} + + +static void +vmxnet3_tx_timeout(struct net_device *netdev) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + adapter->tx_timeout_count++; + + printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name); + schedule_work(&adapter->work); +} + + +static void +vmxnet3_reset_work(struct work_struct *data) +{ + struct vmxnet3_adapter *adapter; + + adapter = container_of(data, struct vmxnet3_adapter, work); + + /* if another thread is resetting the device, no need to proceed */ + if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) + return; + + /* if the device is closed, we must leave it alone */ + if (netif_running(adapter->netdev)) { + printk(KERN_INFO "%s: resetting\n", adapter->netdev->name); + vmxnet3_quiesce_dev(adapter); + vmxnet3_reset_dev(adapter); + vmxnet3_activate_dev(adapter); + } else { + printk(KERN_INFO "%s: already closed\n", adapter->netdev->name); + } + + clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); +} + + +static int __devinit +vmxnet3_probe_device(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + static const struct net_device_ops vmxnet3_netdev_ops = { + .ndo_open = vmxnet3_open, + .ndo_stop = vmxnet3_close, + .ndo_start_xmit = vmxnet3_xmit_frame, + .ndo_set_mac_address = vmxnet3_set_mac_addr, + .ndo_change_mtu = vmxnet3_change_mtu, + .ndo_get_stats = vmxnet3_get_stats, + .ndo_tx_timeout = vmxnet3_tx_timeout, + .ndo_set_multicast_list = vmxnet3_set_mc, + .ndo_vlan_rx_register = vmxnet3_vlan_rx_register, + .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = vmxnet3_netpoll, +#endif + }; + int err; + bool dma64 = false; /* stupid gcc */ + u32 ver; + struct net_device *netdev; + struct vmxnet3_adapter *adapter; + u8 mac[ETH_ALEN]; + + netdev = alloc_etherdev(sizeof(struct vmxnet3_adapter)); + if (!netdev) { + printk(KERN_ERR "Failed to alloc ethernet device for adapter " + "%s\n", pci_name(pdev)); + return -ENOMEM; + } + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + + adapter->shared = pci_alloc_consistent(adapter->pdev, + sizeof(struct Vmxnet3_DriverShared), + &adapter->shared_pa); + if (!adapter->shared) { + printk(KERN_ERR "Failed to allocate memory for %s\n", + pci_name(pdev)); + err = -ENOMEM; + goto err_alloc_shared; + } + + adapter->tqd_start = pci_alloc_consistent(adapter->pdev, + sizeof(struct Vmxnet3_TxQueueDesc) + + sizeof(struct Vmxnet3_RxQueueDesc), + &adapter->queue_desc_pa); + + if (!adapter->tqd_start) { + printk(KERN_ERR "Failed to allocate memory for %s\n", + pci_name(pdev)); + err = -ENOMEM; + goto err_alloc_queue_desc; + } + adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start + + 1); + + adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL); + if (adapter->pm_conf == NULL) { + printk(KERN_ERR "Failed to allocate memory for %s\n", + pci_name(pdev)); + err = -ENOMEM; + goto err_alloc_pm; + } + + err = vmxnet3_alloc_pci_resources(adapter, &dma64); + if (err < 0) + goto err_alloc_pci; + + ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS); + if (ver & 1) { + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1); + } else { + printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter" + " %s\n", ver, pci_name(pdev)); + err = -EBUSY; + goto err_ver; + } + + ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS); + if (ver & 1) { + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1); + } else { + printk(KERN_ERR "Incompatible upt version (0x%x) for " + "adapter %s\n", ver, pci_name(pdev)); + err = -EBUSY; + goto err_ver; + } + + vmxnet3_declare_features(adapter, dma64); + + adapter->dev_number = atomic_read(&devices_found); + vmxnet3_alloc_intr_resources(adapter); + + vmxnet3_read_mac_addr(adapter, mac); + memcpy(netdev->dev_addr, mac, netdev->addr_len); + + netdev->netdev_ops = &vmxnet3_netdev_ops; + netdev->watchdog_timeo = 5 * HZ; + vmxnet3_set_ethtool_ops(netdev); + + INIT_WORK(&adapter->work, vmxnet3_reset_work); + + netif_napi_add(netdev, &adapter->napi, vmxnet3_poll, 64); + SET_NETDEV_DEV(netdev, &pdev->dev); + err = register_netdev(netdev); + + if (err) { + printk(KERN_ERR "Failed to register adapter %s\n", + pci_name(pdev)); + goto err_register; + } + + set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); + atomic_inc(&devices_found); + return 0; + +err_register: + vmxnet3_free_intr_resources(adapter); +err_ver: + vmxnet3_free_pci_resources(adapter); +err_alloc_pci: + kfree(adapter->pm_conf); +err_alloc_pm: + pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) + + sizeof(struct Vmxnet3_RxQueueDesc), + adapter->tqd_start, adapter->queue_desc_pa); +err_alloc_queue_desc: + pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), + adapter->shared, adapter->shared_pa); +err_alloc_shared: + pci_set_drvdata(pdev, NULL); + free_netdev(netdev); + return err; +} + + +static void __devexit +vmxnet3_remove_device(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + flush_scheduled_work(); + + unregister_netdev(netdev); + + vmxnet3_free_intr_resources(adapter); + vmxnet3_free_pci_resources(adapter); + kfree(adapter->pm_conf); + pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) + + sizeof(struct Vmxnet3_RxQueueDesc), + adapter->tqd_start, adapter->queue_desc_pa); + pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), + adapter->shared, adapter->shared_pa); + free_netdev(netdev); +} + + +#ifdef CONFIG_PM + +static int +vmxnet3_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *netdev = pci_get_drvdata(pdev); + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + struct Vmxnet3_PMConf *pmConf; + struct ethhdr *ehdr; + struct arphdr *ahdr; + u8 *arpreq; + struct in_device *in_dev; + struct in_ifaddr *ifa; + int i = 0; + + if (!netif_running(netdev)) + return 0; + + vmxnet3_disable_all_intrs(adapter); + vmxnet3_free_irqs(adapter); + vmxnet3_free_intr_resources(adapter); + + netif_device_detach(netdev); + netif_stop_queue(netdev); + + /* Create wake-up filters. */ + pmConf = adapter->pm_conf; + memset(pmConf, 0, sizeof(*pmConf)); + + if (adapter->wol & WAKE_UCAST) { + pmConf->filters[i].patternSize = ETH_ALEN; + pmConf->filters[i].maskSize = 1; + memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN); + pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */ + + pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; + i++; + } + + if (adapter->wol & WAKE_ARP) { + in_dev = in_dev_get(netdev); + if (!in_dev) + goto skip_arp; + + ifa = (struct in_ifaddr *)in_dev->ifa_list; + if (!ifa) + goto skip_arp; + + pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/ + sizeof(struct arphdr) + /* ARP header */ + 2 * ETH_ALEN + /* 2 Ethernet addresses*/ + 2 * sizeof(u32); /*2 IPv4 addresses */ + pmConf->filters[i].maskSize = + (pmConf->filters[i].patternSize - 1) / 8 + 1; + + /* ETH_P_ARP in Ethernet header. */ + ehdr = (struct ethhdr *)pmConf->filters[i].pattern; + ehdr->h_proto = htons(ETH_P_ARP); + + /* ARPOP_REQUEST in ARP header. */ + ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN]; + ahdr->ar_op = htons(ARPOP_REQUEST); + arpreq = (u8 *)(ahdr + 1); + + /* The Unicast IPv4 address in 'tip' field. */ + arpreq += 2 * ETH_ALEN + sizeof(u32); + *(u32 *)arpreq = ifa->ifa_address; + + /* The mask for the relevant bits. */ + pmConf->filters[i].mask[0] = 0x00; + pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */ + pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */ + pmConf->filters[i].mask[3] = 0x00; + pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */ + pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */ + in_dev_put(in_dev); + + pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; + i++; + } + +skip_arp: + if (adapter->wol & WAKE_MAGIC) + pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC; + + pmConf->numFilters = i; + + adapter->shared->devRead.pmConfDesc.confVer = 1; + adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf); + adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf); + + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_PMCFG); + + pci_save_state(pdev); + pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND), + adapter->wol); + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND)); + + return 0; +} + + +static int +vmxnet3_resume(struct device *device) +{ + int err; + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *netdev = pci_get_drvdata(pdev); + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + struct Vmxnet3_PMConf *pmConf; + + if (!netif_running(netdev)) + return 0; + + /* Destroy wake-up filters. */ + pmConf = adapter->pm_conf; + memset(pmConf, 0, sizeof(*pmConf)); + + adapter->shared->devRead.pmConfDesc.confVer = 1; + adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf); + adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf); + + netif_device_attach(netdev); + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + err = pci_enable_device_mem(pdev); + if (err != 0) + return err; + + pci_enable_wake(pdev, PCI_D0, 0); + + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_PMCFG); + vmxnet3_alloc_intr_resources(adapter); + vmxnet3_request_irqs(adapter); + vmxnet3_enable_all_intrs(adapter); + + return 0; +} + +static struct dev_pm_ops vmxnet3_pm_ops = { + .suspend = vmxnet3_suspend, + .resume = vmxnet3_resume, +}; +#endif + +static struct pci_driver vmxnet3_driver = { + .name = vmxnet3_driver_name, + .id_table = vmxnet3_pciid_table, + .probe = vmxnet3_probe_device, + .remove = __devexit_p(vmxnet3_remove_device), +#ifdef CONFIG_PM + .driver.pm = &vmxnet3_pm_ops, +#endif +}; + + +static int __init +vmxnet3_init_module(void) +{ + printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC, + VMXNET3_DRIVER_VERSION_REPORT); + return pci_register_driver(&vmxnet3_driver); +} + +module_init(vmxnet3_init_module); + + +static void +vmxnet3_exit_module(void) +{ + pci_unregister_driver(&vmxnet3_driver); +} + +module_exit(vmxnet3_exit_module); + +MODULE_AUTHOR("VMware, Inc."); +MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING); diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c new file mode 100644 index 00000000000..c2c15e4cafc --- /dev/null +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -0,0 +1,566 @@ +/* + * Linux driver for VMware's vmxnet3 ethernet NIC. + * + * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2 of the License and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> + * + */ + + +#include "vmxnet3_int.h" + +struct vmxnet3_stat_desc { + char desc[ETH_GSTRING_LEN]; + int offset; +}; + + +static u32 +vmxnet3_get_rx_csum(struct net_device *netdev) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + return adapter->rxcsum; +} + + +static int +vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + if (adapter->rxcsum != val) { + adapter->rxcsum = val; + if (netif_running(netdev)) { + if (val) + adapter->shared->devRead.misc.uptFeatures |= + UPT1_F_RXCSUM; + else + adapter->shared->devRead.misc.uptFeatures &= + ~UPT1_F_RXCSUM; + + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_FEATURE); + } + } + return 0; +} + + +/* per tq stats maintained by the device */ +static const struct vmxnet3_stat_desc +vmxnet3_tq_dev_stats[] = { + /* description, offset */ + { "TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, + { "TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, + { "ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, + { "ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, + { "mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, + { "mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, + { "bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, + { "bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, + { "pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, + { "pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, +}; + +/* per tq stats maintained by the driver */ +static const struct vmxnet3_stat_desc +vmxnet3_tq_driver_stats[] = { + /* description, offset */ + {"drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, + drop_total) }, + { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, + drop_too_many_frags) }, + { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, + drop_oversized_hdr) }, + { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, + drop_hdr_inspect_err) }, + { " tso", offsetof(struct vmxnet3_tq_driver_stats, + drop_tso) }, + { "ring full", offsetof(struct vmxnet3_tq_driver_stats, + tx_ring_full) }, + { "pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, + linearized) }, + { "hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, + copy_skb_header) }, + { "giant hdr", offsetof(struct vmxnet3_tq_driver_stats, + oversized_hdr) }, +}; + +/* per rq stats maintained by the device */ +static const struct vmxnet3_stat_desc +vmxnet3_rq_dev_stats[] = { + { "LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, + { "LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, + { "ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, + { "ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, + { "mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, + { "mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, + { "bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, + { "bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, + { "pkts rx out of buf", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, + { "pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, +}; + +/* per rq stats maintained by the driver */ +static const struct vmxnet3_stat_desc +vmxnet3_rq_driver_stats[] = { + /* description, offset */ + { "drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, + drop_total) }, + { " err", offsetof(struct vmxnet3_rq_driver_stats, + drop_err) }, + { " fcs", offsetof(struct vmxnet3_rq_driver_stats, + drop_fcs) }, + { "rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, + rx_buf_alloc_failure) }, +}; + +/* gloabl stats maintained by the driver */ +static const struct vmxnet3_stat_desc +vmxnet3_global_stats[] = { + /* description, offset */ + { "tx timeout count", offsetof(struct vmxnet3_adapter, + tx_timeout_count) } +}; + + +struct net_device_stats * +vmxnet3_get_stats(struct net_device *netdev) +{ + struct vmxnet3_adapter *adapter; + struct vmxnet3_tq_driver_stats *drvTxStats; + struct vmxnet3_rq_driver_stats *drvRxStats; + struct UPT1_TxStats *devTxStats; + struct UPT1_RxStats *devRxStats; + struct net_device_stats *net_stats = &netdev->stats; + + adapter = netdev_priv(netdev); + + /* Collect the dev stats into the shared area */ + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); + + /* Assuming that we have a single queue device */ + devTxStats = &adapter->tqd_start->stats; + devRxStats = &adapter->rqd_start->stats; + + /* Get access to the driver stats per queue */ + drvTxStats = &adapter->tx_queue.stats; + drvRxStats = &adapter->rx_queue.stats; + + memset(net_stats, 0, sizeof(*net_stats)); + + net_stats->rx_packets = devRxStats->ucastPktsRxOK + + devRxStats->mcastPktsRxOK + + devRxStats->bcastPktsRxOK; + + net_stats->tx_packets = devTxStats->ucastPktsTxOK + + devTxStats->mcastPktsTxOK + + devTxStats->bcastPktsTxOK; + + net_stats->rx_bytes = devRxStats->ucastBytesRxOK + + devRxStats->mcastBytesRxOK + + devRxStats->bcastBytesRxOK; + + net_stats->tx_bytes = devTxStats->ucastBytesTxOK + + devTxStats->mcastBytesTxOK + + devTxStats->bcastBytesTxOK; + + net_stats->rx_errors = devRxStats->pktsRxError; + net_stats->tx_errors = devTxStats->pktsTxError; + net_stats->rx_dropped = drvRxStats->drop_total; + net_stats->tx_dropped = drvTxStats->drop_total; + net_stats->multicast = devRxStats->mcastPktsRxOK; + + return net_stats; +} + +static int +vmxnet3_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(vmxnet3_tq_dev_stats) + + ARRAY_SIZE(vmxnet3_tq_driver_stats) + + ARRAY_SIZE(vmxnet3_rq_dev_stats) + + ARRAY_SIZE(vmxnet3_rq_driver_stats) + + ARRAY_SIZE(vmxnet3_global_stats); + default: + return -EOPNOTSUPP; + } +} + + +static int +vmxnet3_get_regs_len(struct net_device *netdev) +{ + return 20 * sizeof(u32); +} + + +static void +vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver)); + drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0'; + + strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT, + sizeof(drvinfo->version)); + drvinfo->driver[sizeof(drvinfo->version) - 1] = '\0'; + + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + drvinfo->fw_version[sizeof(drvinfo->fw_version) - 1] = '\0'; + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + ETHTOOL_BUSINFO_LEN); + drvinfo->n_stats = vmxnet3_get_sset_count(netdev, ETH_SS_STATS); + drvinfo->testinfo_len = 0; + drvinfo->eedump_len = 0; + drvinfo->regdump_len = vmxnet3_get_regs_len(netdev); +} + + +static void +vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) +{ + if (stringset == ETH_SS_STATS) { + int i; + + for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { + memcpy(buf, vmxnet3_tq_dev_stats[i].desc, + ETH_GSTRING_LEN); + buf += ETH_GSTRING_LEN; + } + for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) { + memcpy(buf, vmxnet3_tq_driver_stats[i].desc, + ETH_GSTRING_LEN); + buf += ETH_GSTRING_LEN; + } + for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { + memcpy(buf, vmxnet3_rq_dev_stats[i].desc, + ETH_GSTRING_LEN); + buf += ETH_GSTRING_LEN; + } + for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) { + memcpy(buf, vmxnet3_rq_driver_stats[i].desc, + ETH_GSTRING_LEN); + buf += ETH_GSTRING_LEN; + } + for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { + memcpy(buf, vmxnet3_global_stats[i].desc, + ETH_GSTRING_LEN); + buf += ETH_GSTRING_LEN; + } + } +} + +static u32 +vmxnet3_get_flags(struct net_device *netdev) { + return netdev->features; +} + +static int +vmxnet3_set_flags(struct net_device *netdev, u32 data) { + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1; + u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; + + if (lro_requested ^ lro_present) { + /* toggle the LRO feature*/ + netdev->features ^= NETIF_F_LRO; + + /* update harware LRO capability accordingly */ + if (lro_requested) + adapter->shared->devRead.misc.uptFeatures &= UPT1_F_LRO; + else + adapter->shared->devRead.misc.uptFeatures &= + ~UPT1_F_LRO; + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_FEATURE); + } + return 0; +} + +static void +vmxnet3_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *buf) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + u8 *base; + int i; + + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); + + /* this does assume each counter is 64-bit wide */ + + base = (u8 *)&adapter->tqd_start->stats; + for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) + *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset); + + base = (u8 *)&adapter->tx_queue.stats; + for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) + *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset); + + base = (u8 *)&adapter->rqd_start->stats; + for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) + *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset); + + base = (u8 *)&adapter->rx_queue.stats; + for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) + *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset); + + base = (u8 *)adapter; + for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) + *buf++ = *(u64 *)(base + vmxnet3_global_stats[i].offset); +} + + +static void +vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + u32 *buf = p; + + memset(p, 0, vmxnet3_get_regs_len(netdev)); + + regs->version = 1; + + /* Update vmxnet3_get_regs_len if we want to dump more registers */ + + /* make each ring use multiple of 16 bytes */ + buf[0] = adapter->tx_queue.tx_ring.next2fill; + buf[1] = adapter->tx_queue.tx_ring.next2comp; + buf[2] = adapter->tx_queue.tx_ring.gen; + buf[3] = 0; + + buf[4] = adapter->tx_queue.comp_ring.next2proc; + buf[5] = adapter->tx_queue.comp_ring.gen; + buf[6] = adapter->tx_queue.stopped; + buf[7] = 0; + + buf[8] = adapter->rx_queue.rx_ring[0].next2fill; + buf[9] = adapter->rx_queue.rx_ring[0].next2comp; + buf[10] = adapter->rx_queue.rx_ring[0].gen; + buf[11] = 0; + + buf[12] = adapter->rx_queue.rx_ring[1].next2fill; + buf[13] = adapter->rx_queue.rx_ring[1].next2comp; + buf[14] = adapter->rx_queue.rx_ring[1].gen; + buf[15] = 0; + + buf[16] = adapter->rx_queue.comp_ring.next2proc; + buf[17] = adapter->rx_queue.comp_ring.gen; + buf[18] = 0; + buf[19] = 0; +} + + +static void +vmxnet3_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + wol->supported = WAKE_UCAST | WAKE_ARP | WAKE_MAGIC; + wol->wolopts = adapter->wol; +} + + +static int +vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_PHY | WAKE_MCAST | WAKE_BCAST | + WAKE_MAGICSECURE)) { + return -EOPNOTSUPP; + } + + adapter->wol = wol->wolopts; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + + +static int +vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + ecmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full | + SUPPORTED_TP; + ecmd->advertising = ADVERTISED_TP; + ecmd->port = PORT_TP; + ecmd->transceiver = XCVR_INTERNAL; + + if (adapter->link_speed) { + ecmd->speed = adapter->link_speed; + ecmd->duplex = DUPLEX_FULL; + } else { + ecmd->speed = -1; + ecmd->duplex = -1; + } + return 0; +} + + +static void +vmxnet3_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *param) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE; + param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE; + param->rx_mini_max_pending = 0; + param->rx_jumbo_max_pending = 0; + + param->rx_pending = adapter->rx_queue.rx_ring[0].size; + param->tx_pending = adapter->tx_queue.tx_ring.size; + param->rx_mini_pending = 0; + param->rx_jumbo_pending = 0; +} + + +static int +vmxnet3_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *param) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + u32 new_tx_ring_size, new_rx_ring_size; + u32 sz; + int err = 0; + + if (param->tx_pending == 0 || param->tx_pending > + VMXNET3_TX_RING_MAX_SIZE) + return -EINVAL; + + if (param->rx_pending == 0 || param->rx_pending > + VMXNET3_RX_RING_MAX_SIZE) + return -EINVAL; + + + /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */ + new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) & + ~VMXNET3_RING_SIZE_MASK; + new_tx_ring_size = min_t(u32, new_tx_ring_size, + VMXNET3_TX_RING_MAX_SIZE); + if (new_tx_ring_size > VMXNET3_TX_RING_MAX_SIZE || (new_tx_ring_size % + VMXNET3_RING_SIZE_ALIGN) != 0) + return -EINVAL; + + /* ring0 has to be a multiple of + * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN + */ + sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; + new_rx_ring_size = (param->rx_pending + sz - 1) / sz * sz; + new_rx_ring_size = min_t(u32, new_rx_ring_size, + VMXNET3_RX_RING_MAX_SIZE / sz * sz); + if (new_rx_ring_size > VMXNET3_RX_RING_MAX_SIZE || (new_rx_ring_size % + sz) != 0) + return -EINVAL; + + if (new_tx_ring_size == adapter->tx_queue.tx_ring.size && + new_rx_ring_size == adapter->rx_queue.rx_ring[0].size) { + return 0; + } + + /* + * Reset_work may be in the middle of resetting the device, wait for its + * completion. + */ + while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) + msleep(1); + + if (netif_running(netdev)) { + vmxnet3_quiesce_dev(adapter); + vmxnet3_reset_dev(adapter); + + /* recreate the rx queue and the tx queue based on the + * new sizes */ + vmxnet3_tq_destroy(&adapter->tx_queue, adapter); + vmxnet3_rq_destroy(&adapter->rx_queue, adapter); + + err = vmxnet3_create_queues(adapter, new_tx_ring_size, + new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE); + if (err) { + /* failed, most likely because of OOM, try default + * size */ + printk(KERN_ERR "%s: failed to apply new sizes, try the" + " default ones\n", netdev->name); + err = vmxnet3_create_queues(adapter, + VMXNET3_DEF_TX_RING_SIZE, + VMXNET3_DEF_RX_RING_SIZE, + VMXNET3_DEF_RX_RING_SIZE); + if (err) { + printk(KERN_ERR "%s: failed to create queues " + "with default sizes. Closing it\n", + netdev->name); + goto out; + } + } + + err = vmxnet3_activate_dev(adapter); + if (err) + printk(KERN_ERR "%s: failed to re-activate, error %d." + " Closing it\n", netdev->name, err); + } + +out: + clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); + if (err) + vmxnet3_force_close(adapter); + + return err; +} + + +static struct ethtool_ops vmxnet3_ethtool_ops = { + .get_settings = vmxnet3_get_settings, + .get_drvinfo = vmxnet3_get_drvinfo, + .get_regs_len = vmxnet3_get_regs_len, + .get_regs = vmxnet3_get_regs, + .get_wol = vmxnet3_get_wol, + .set_wol = vmxnet3_set_wol, + .get_link = ethtool_op_get_link, + .get_rx_csum = vmxnet3_get_rx_csum, + .set_rx_csum = vmxnet3_set_rx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = ethtool_op_set_tx_hw_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_tso = ethtool_op_get_tso, + .set_tso = ethtool_op_set_tso, + .get_strings = vmxnet3_get_strings, + .get_flags = vmxnet3_get_flags, + .set_flags = vmxnet3_set_flags, + .get_sset_count = vmxnet3_get_sset_count, + .get_ethtool_stats = vmxnet3_get_ethtool_stats, + .get_ringparam = vmxnet3_get_ringparam, + .set_ringparam = vmxnet3_set_ringparam, +}; + +void vmxnet3_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &vmxnet3_ethtool_ops); +} diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h new file mode 100644 index 00000000000..6bb91576e99 --- /dev/null +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -0,0 +1,389 @@ +/* + * Linux driver for VMware's vmxnet3 ethernet NIC. + * + * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2 of the License and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> + * + */ + +#ifndef _VMXNET3_INT_H +#define _VMXNET3_INT_H + +#include <linux/types.h> +#include <linux/ethtool.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/ethtool.h> +#include <linux/compiler.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/ioport.h> +#include <linux/highmem.h> +#include <linux/init.h> +#include <linux/timer.h> +#include <linux/skbuff.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/uaccess.h> +#include <asm/dma.h> +#include <asm/page.h> + +#include <linux/tcp.h> +#include <linux/udp.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/in.h> +#include <linux/etherdevice.h> +#include <asm/checksum.h> +#include <linux/if_vlan.h> +#include <linux/if_arp.h> +#include <linux/inetdevice.h> +#include <linux/dst.h> + +#include "vmxnet3_defs.h" + +#ifdef DEBUG +# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI(debug)" +#else +# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI" +#endif + + +/* + * Version numbers + */ +#define VMXNET3_DRIVER_VERSION_STRING "1.0.5.0-k" + +/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ +#define VMXNET3_DRIVER_VERSION_NUM 0x01000500 + + +/* + * Capabilities + */ + +enum { + VMNET_CAP_SG = 0x0001, /* Can do scatter-gather transmits. */ + VMNET_CAP_IP4_CSUM = 0x0002, /* Can checksum only TCP/UDP over + * IPv4 */ + VMNET_CAP_HW_CSUM = 0x0004, /* Can checksum all packets. */ + VMNET_CAP_HIGH_DMA = 0x0008, /* Can DMA to high memory. */ + VMNET_CAP_TOE = 0x0010, /* Supports TCP/IP offload. */ + VMNET_CAP_TSO = 0x0020, /* Supports TCP Segmentation + * offload */ + VMNET_CAP_SW_TSO = 0x0040, /* Supports SW TCP Segmentation */ + VMNET_CAP_VMXNET_APROM = 0x0080, /* Vmxnet APROM support */ + VMNET_CAP_HW_TX_VLAN = 0x0100, /* Can we do VLAN tagging in HW */ + VMNET_CAP_HW_RX_VLAN = 0x0200, /* Can we do VLAN untagging in HW */ + VMNET_CAP_SW_VLAN = 0x0400, /* VLAN tagging/untagging in SW */ + VMNET_CAP_WAKE_PCKT_RCV = 0x0800, /* Can wake on network packet recv? */ + VMNET_CAP_ENABLE_INT_INLINE = 0x1000, /* Enable Interrupt Inline */ + VMNET_CAP_ENABLE_HEADER_COPY = 0x2000, /* copy header for vmkernel */ + VMNET_CAP_TX_CHAIN = 0x4000, /* Guest can use multiple tx entries + * for a pkt */ + VMNET_CAP_RX_CHAIN = 0x8000, /* pkt can span multiple rx entries */ + VMNET_CAP_LPD = 0x10000, /* large pkt delivery */ + VMNET_CAP_BPF = 0x20000, /* BPF Support in VMXNET Virtual HW*/ + VMNET_CAP_SG_SPAN_PAGES = 0x40000, /* Scatter-gather can span multiple*/ + /* pages transmits */ + VMNET_CAP_IP6_CSUM = 0x80000, /* Can do IPv6 csum offload. */ + VMNET_CAP_TSO6 = 0x100000, /* TSO seg. offload for IPv6 pkts. */ + VMNET_CAP_TSO256k = 0x200000, /* Can do TSO seg offload for */ + /* pkts up to 256kB. */ + VMNET_CAP_UPT = 0x400000 /* Support UPT */ +}; + +/* + * PCI vendor and device IDs. + */ +#define PCI_VENDOR_ID_VMWARE 0x15AD +#define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07B0 +#define MAX_ETHERNET_CARDS 10 +#define MAX_PCI_PASSTHRU_DEVICE 6 + +struct vmxnet3_cmd_ring { + union Vmxnet3_GenericDesc *base; + u32 size; + u32 next2fill; + u32 next2comp; + u8 gen; + dma_addr_t basePA; +}; + +static inline void +vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring) +{ + ring->next2fill++; + if (unlikely(ring->next2fill == ring->size)) { + ring->next2fill = 0; + VMXNET3_FLIP_RING_GEN(ring->gen); + } +} + +static inline void +vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring) +{ + VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size); +} + +static inline int +vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring) +{ + return (ring->next2comp > ring->next2fill ? 0 : ring->size) + + ring->next2comp - ring->next2fill - 1; +} + +struct vmxnet3_comp_ring { + union Vmxnet3_GenericDesc *base; + u32 size; + u32 next2proc; + u8 gen; + u8 intr_idx; + dma_addr_t basePA; +}; + +static inline void +vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring) +{ + ring->next2proc++; + if (unlikely(ring->next2proc == ring->size)) { + ring->next2proc = 0; + VMXNET3_FLIP_RING_GEN(ring->gen); + } +} + +struct vmxnet3_tx_data_ring { + struct Vmxnet3_TxDataDesc *base; + u32 size; + dma_addr_t basePA; +}; + +enum vmxnet3_buf_map_type { + VMXNET3_MAP_INVALID = 0, + VMXNET3_MAP_NONE, + VMXNET3_MAP_SINGLE, + VMXNET3_MAP_PAGE, +}; + +struct vmxnet3_tx_buf_info { + u32 map_type; + u16 len; + u16 sop_idx; + dma_addr_t dma_addr; + struct sk_buff *skb; +}; + +struct vmxnet3_tq_driver_stats { + u64 drop_total; /* # of pkts dropped by the driver, the + * counters below track droppings due to + * different reasons + */ + u64 drop_too_many_frags; + u64 drop_oversized_hdr; + u64 drop_hdr_inspect_err; + u64 drop_tso; + + u64 tx_ring_full; + u64 linearized; /* # of pkts linearized */ + u64 copy_skb_header; /* # of times we have to copy skb header */ + u64 oversized_hdr; +}; + +struct vmxnet3_tx_ctx { + bool ipv4; + u16 mss; + u32 eth_ip_hdr_size; /* only valid for pkts requesting tso or csum + * offloading + */ + u32 l4_hdr_size; /* only valid if mss != 0 */ + u32 copy_size; /* # of bytes copied into the data ring */ + union Vmxnet3_GenericDesc *sop_txd; + union Vmxnet3_GenericDesc *eop_txd; +}; + +struct vmxnet3_tx_queue { + spinlock_t tx_lock; + struct vmxnet3_cmd_ring tx_ring; + struct vmxnet3_tx_buf_info *buf_info; + struct vmxnet3_tx_data_ring data_ring; + struct vmxnet3_comp_ring comp_ring; + struct Vmxnet3_TxQueueCtrl *shared; + struct vmxnet3_tq_driver_stats stats; + bool stopped; + int num_stop; /* # of times the queue is + * stopped */ +} __attribute__((__aligned__(SMP_CACHE_BYTES))); + +enum vmxnet3_rx_buf_type { + VMXNET3_RX_BUF_NONE = 0, + VMXNET3_RX_BUF_SKB = 1, + VMXNET3_RX_BUF_PAGE = 2 +}; + +struct vmxnet3_rx_buf_info { + enum vmxnet3_rx_buf_type buf_type; + u16 len; + union { + struct sk_buff *skb; + struct page *page; + }; + dma_addr_t dma_addr; +}; + +struct vmxnet3_rx_ctx { + struct sk_buff *skb; + u32 sop_idx; +}; + +struct vmxnet3_rq_driver_stats { + u64 drop_total; + u64 drop_err; + u64 drop_fcs; + u64 rx_buf_alloc_failure; +}; + +struct vmxnet3_rx_queue { + struct vmxnet3_cmd_ring rx_ring[2]; + struct vmxnet3_comp_ring comp_ring; + struct vmxnet3_rx_ctx rx_ctx; + u32 qid; /* rqID in RCD for buffer from 1st ring */ + u32 qid2; /* rqID in RCD for buffer from 2nd ring */ + u32 uncommitted[2]; /* # of buffers allocated since last RXPROD + * update */ + struct vmxnet3_rx_buf_info *buf_info[2]; + struct Vmxnet3_RxQueueCtrl *shared; + struct vmxnet3_rq_driver_stats stats; +} __attribute__((__aligned__(SMP_CACHE_BYTES))); + +#define VMXNET3_LINUX_MAX_MSIX_VECT 1 + +struct vmxnet3_intr { + enum vmxnet3_intr_mask_mode mask_mode; + enum vmxnet3_intr_type type; /* MSI-X, MSI, or INTx? */ + u8 num_intrs; /* # of intr vectors */ + u8 event_intr_idx; /* idx of the intr vector for event */ + u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */ +#ifdef CONFIG_PCI_MSI + struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT]; +#endif +}; + +#define VMXNET3_STATE_BIT_RESETTING 0 +#define VMXNET3_STATE_BIT_QUIESCED 1 +struct vmxnet3_adapter { + struct vmxnet3_tx_queue tx_queue; + struct vmxnet3_rx_queue rx_queue; + struct napi_struct napi; + struct vlan_group *vlan_grp; + + struct vmxnet3_intr intr; + + struct Vmxnet3_DriverShared *shared; + struct Vmxnet3_PMConf *pm_conf; + struct Vmxnet3_TxQueueDesc *tqd_start; /* first tx queue desc */ + struct Vmxnet3_RxQueueDesc *rqd_start; /* first rx queue desc */ + struct net_device *netdev; + struct pci_dev *pdev; + + u8 *hw_addr0; /* for BAR 0 */ + u8 *hw_addr1; /* for BAR 1 */ + + /* feature control */ + bool rxcsum; + bool lro; + bool jumbo_frame; + + /* rx buffer related */ + unsigned skb_buf_size; + int rx_buf_per_pkt; /* only apply to the 1st ring */ + dma_addr_t shared_pa; + dma_addr_t queue_desc_pa; + + /* Wake-on-LAN */ + u32 wol; + + /* Link speed */ + u32 link_speed; /* in mbps */ + + u64 tx_timeout_count; + struct work_struct work; + + unsigned long state; /* VMXNET3_STATE_BIT_xxx */ + + int dev_number; +}; + +#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \ + writel((val), (adapter)->hw_addr0 + (reg)) +#define VMXNET3_READ_BAR0_REG(adapter, reg) \ + readl((adapter)->hw_addr0 + (reg)) + +#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \ + writel((val), (adapter)->hw_addr1 + (reg)) +#define VMXNET3_READ_BAR1_REG(adapter, reg) \ + readl((adapter)->hw_addr1 + (reg)) + +#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5) +#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \ + ((rq)->rx_ring[ring_idx].size >> 3) + +#define VMXNET3_GET_ADDR_LO(dma) ((u32)(dma)) +#define VMXNET3_GET_ADDR_HI(dma) ((u32)(((u64)(dma)) >> 32)) + +/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */ +#define VMXNET3_DEF_TX_RING_SIZE 512 +#define VMXNET3_DEF_RX_RING_SIZE 256 + +#define VMXNET3_MAX_ETH_HDR_SIZE 22 +#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024) + +int +vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter); + +int +vmxnet3_activate_dev(struct vmxnet3_adapter *adapter); + +void +vmxnet3_force_close(struct vmxnet3_adapter *adapter); + +void +vmxnet3_reset_dev(struct vmxnet3_adapter *adapter); + +void +vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, + struct vmxnet3_adapter *adapter); + +void +vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, + struct vmxnet3_adapter *adapter); + +int +vmxnet3_create_queues(struct vmxnet3_adapter *adapter, + u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size); + +extern void vmxnet3_set_ethtool_ops(struct net_device *netdev); +extern struct net_device_stats *vmxnet3_get_stats(struct net_device *netdev); + +extern char vmxnet3_driver_name[]; +#endif diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index cf5fd17ad70..f1bff98acd1 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c @@ -58,8 +58,7 @@ struct cisco_state { spinlock_t lock; unsigned long last_poll; int up; - int request_sent; - u32 txseq; /* TX sequence number */ + u32 txseq; /* TX sequence number, 0 = none */ u32 rxseq; /* RX sequence number */ }; @@ -163,6 +162,7 @@ static int cisco_rx(struct sk_buff *skb) struct cisco_packet *cisco_data; struct in_device *in_dev; __be32 addr, mask; + u32 ack; if (skb->len < sizeof(struct hdlc_header)) goto rx_error; @@ -223,8 +223,10 @@ static int cisco_rx(struct sk_buff *skb) case CISCO_KEEPALIVE_REQ: spin_lock(&st->lock); st->rxseq = ntohl(cisco_data->par1); - if (st->request_sent && - ntohl(cisco_data->par2) == st->txseq) { + ack = ntohl(cisco_data->par2); + if (ack && (ack == st->txseq || + /* our current REQ may be in transit */ + ack == st->txseq - 1)) { st->last_poll = jiffies; if (!st->up) { u32 sec, min, hrs, days; @@ -275,7 +277,6 @@ static void cisco_timer(unsigned long arg) cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq), htonl(st->rxseq)); - st->request_sent = 1; spin_unlock(&st->lock); st->timer.expires = jiffies + st->settings.interval * HZ; @@ -293,9 +294,7 @@ static void cisco_start(struct net_device *dev) unsigned long flags; spin_lock_irqsave(&st->lock, flags); - st->up = 0; - st->request_sent = 0; - st->txseq = st->rxseq = 0; + st->up = st->txseq = st->rxseq = 0; spin_unlock_irqrestore(&st->lock, flags); init_timer(&st->timer); @@ -317,8 +316,7 @@ static void cisco_stop(struct net_device *dev) spin_lock_irqsave(&st->lock, flags); netif_dormant_on(dev); - st->up = 0; - st->request_sent = 0; + st->up = st->txseq = 0; spin_unlock_irqrestore(&st->lock, flags); } diff --git a/drivers/net/wireless/adm8211.h b/drivers/net/wireless/adm8211.h index 4f6ab132218..b07e4d3a6b4 100644 --- a/drivers/net/wireless/adm8211.h +++ b/drivers/net/wireless/adm8211.h @@ -266,7 +266,7 @@ do { \ #define ADM8211_SYNCTL_CS1 (1 << 28) #define ADM8211_SYNCTL_CAL (1 << 27) #define ADM8211_SYNCTL_SELCAL (1 << 26) -#define ADM8211_SYNCTL_RFtype ((1 << 24) || (1 << 23) || (1 << 22)) +#define ADM8211_SYNCTL_RFtype ((1 << 24) | (1 << 23) | (1 << 22)) #define ADM8211_SYNCTL_RFMD (1 << 22) #define ADM8211_SYNCTL_GENERAL (0x7 << 22) /* SYNCTL 21:0 Data (Si4126: 18-bit data, 4-bit address) */ diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h index fa1549a03c7..660716214d4 100644 --- a/drivers/net/wireless/b43/b43.h +++ b/drivers/net/wireless/b43/b43.h @@ -607,82 +607,7 @@ struct b43_qos_params { struct ieee80211_tx_queue_params p; }; -struct b43_wldev; - -/* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */ -struct b43_wl { - /* Pointer to the active wireless device on this chip */ - struct b43_wldev *current_dev; - /* Pointer to the ieee80211 hardware data structure */ - struct ieee80211_hw *hw; - - /* Global driver mutex. Every operation must run with this mutex locked. */ - struct mutex mutex; - /* Hard-IRQ spinlock. This lock protects things used in the hard-IRQ - * handler, only. This basically is just the IRQ mask register. */ - spinlock_t hardirq_lock; - - /* The number of queues that were registered with the mac80211 subsystem - * initially. This is a backup copy of hw->queues in case hw->queues has - * to be dynamically lowered at runtime (Firmware does not support QoS). - * hw->queues has to be restored to the original value before unregistering - * from the mac80211 subsystem. */ - u16 mac80211_initially_registered_queues; - - /* We can only have one operating interface (802.11 core) - * at a time. General information about this interface follows. - */ - - struct ieee80211_vif *vif; - /* The MAC address of the operating interface. */ - u8 mac_addr[ETH_ALEN]; - /* Current BSSID */ - u8 bssid[ETH_ALEN]; - /* Interface type. (NL80211_IFTYPE_XXX) */ - int if_type; - /* Is the card operating in AP, STA or IBSS mode? */ - bool operating; - /* filter flags */ - unsigned int filter_flags; - /* Stats about the wireless interface */ - struct ieee80211_low_level_stats ieee_stats; - -#ifdef CONFIG_B43_HWRNG - struct hwrng rng; - bool rng_initialized; - char rng_name[30 + 1]; -#endif /* CONFIG_B43_HWRNG */ - - /* List of all wireless devices on this chip */ - struct list_head devlist; - u8 nr_devs; - - bool radiotap_enabled; - bool radio_enabled; - - /* The beacon we are currently using (AP or IBSS mode). */ - struct sk_buff *current_beacon; - bool beacon0_uploaded; - bool beacon1_uploaded; - bool beacon_templates_virgin; /* Never wrote the templates? */ - struct work_struct beacon_update_trigger; - - /* The current QOS parameters for the 4 queues. */ - struct b43_qos_params qos_params[4]; - - /* Work for adjustment of the transmission power. - * This is scheduled when we determine that the actual TX output - * power doesn't match what we want. */ - struct work_struct txpower_adjust_work; - - /* Packet transmit work */ - struct work_struct tx_work; - /* Queue of packets to be transmitted. */ - struct sk_buff_head tx_queue; - - /* The device LEDs. */ - struct b43_leds leds; -}; +struct b43_wl; /* The type of the firmware file. */ enum b43_firmware_file_type { @@ -824,6 +749,97 @@ struct b43_wldev { #endif }; +/* + * Include goes here to avoid a dependency problem. + * A better fix would be to integrate xmit.h into b43.h. + */ +#include "xmit.h" + +/* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */ +struct b43_wl { + /* Pointer to the active wireless device on this chip */ + struct b43_wldev *current_dev; + /* Pointer to the ieee80211 hardware data structure */ + struct ieee80211_hw *hw; + + /* Global driver mutex. Every operation must run with this mutex locked. */ + struct mutex mutex; + /* Hard-IRQ spinlock. This lock protects things used in the hard-IRQ + * handler, only. This basically is just the IRQ mask register. */ + spinlock_t hardirq_lock; + + /* The number of queues that were registered with the mac80211 subsystem + * initially. This is a backup copy of hw->queues in case hw->queues has + * to be dynamically lowered at runtime (Firmware does not support QoS). + * hw->queues has to be restored to the original value before unregistering + * from the mac80211 subsystem. */ + u16 mac80211_initially_registered_queues; + + /* We can only have one operating interface (802.11 core) + * at a time. General information about this interface follows. + */ + + struct ieee80211_vif *vif; + /* The MAC address of the operating interface. */ + u8 mac_addr[ETH_ALEN]; + /* Current BSSID */ + u8 bssid[ETH_ALEN]; + /* Interface type. (NL80211_IFTYPE_XXX) */ + int if_type; + /* Is the card operating in AP, STA or IBSS mode? */ + bool operating; + /* filter flags */ + unsigned int filter_flags; + /* Stats about the wireless interface */ + struct ieee80211_low_level_stats ieee_stats; + +#ifdef CONFIG_B43_HWRNG + struct hwrng rng; + bool rng_initialized; + char rng_name[30 + 1]; +#endif /* CONFIG_B43_HWRNG */ + + /* List of all wireless devices on this chip */ + struct list_head devlist; + u8 nr_devs; + + bool radiotap_enabled; + bool radio_enabled; + + /* The beacon we are currently using (AP or IBSS mode). */ + struct sk_buff *current_beacon; + bool beacon0_uploaded; + bool beacon1_uploaded; + bool beacon_templates_virgin; /* Never wrote the templates? */ + struct work_struct beacon_update_trigger; + + /* The current QOS parameters for the 4 queues. */ + struct b43_qos_params qos_params[4]; + + /* Work for adjustment of the transmission power. + * This is scheduled when we determine that the actual TX output + * power doesn't match what we want. */ + struct work_struct txpower_adjust_work; + + /* Packet transmit work */ + struct work_struct tx_work; + /* Queue of packets to be transmitted. */ + struct sk_buff_head tx_queue; + + /* The device LEDs. */ + struct b43_leds leds; + +#ifdef CONFIG_B43_PIO + /* + * RX/TX header/tail buffers used by the frame transmit functions. + */ + struct b43_rxhdr_fw4 rxhdr; + struct b43_txhdr txhdr; + u8 rx_tail[4]; + u8 tx_tail[4]; +#endif /* CONFIG_B43_PIO */ +}; + static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw) { return hw->priv; diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c index fbe3d4f62ce..1e8dba48800 100644 --- a/drivers/net/wireless/b43/leds.c +++ b/drivers/net/wireless/b43/leds.c @@ -348,9 +348,9 @@ void b43_leds_register(struct b43_wldev *dev) } } -void b43_leds_unregister(struct b43_wldev *dev) +void b43_leds_unregister(struct b43_wl *wl) { - struct b43_leds *leds = &dev->wl->leds; + struct b43_leds *leds = &wl->leds; b43_unregister_led(&leds->led_tx); b43_unregister_led(&leds->led_rx); diff --git a/drivers/net/wireless/b43/leds.h b/drivers/net/wireless/b43/leds.h index 9592e4c5a5f..4c56187810f 100644 --- a/drivers/net/wireless/b43/leds.h +++ b/drivers/net/wireless/b43/leds.h @@ -60,7 +60,7 @@ enum b43_led_behaviour { }; void b43_leds_register(struct b43_wldev *dev); -void b43_leds_unregister(struct b43_wldev *dev); +void b43_leds_unregister(struct b43_wl *wl); void b43_leds_init(struct b43_wldev *dev); void b43_leds_exit(struct b43_wldev *dev); void b43_leds_stop(struct b43_wldev *dev); @@ -76,7 +76,7 @@ struct b43_leds { static inline void b43_leds_register(struct b43_wldev *dev) { } -static inline void b43_leds_unregister(struct b43_wldev *dev) +static inline void b43_leds_unregister(struct b43_wl *wl) { } static inline void b43_leds_init(struct b43_wldev *dev) diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 9b907a36bb8..df6b26a0c05 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -3874,6 +3874,7 @@ static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev) { struct b43_wl *wl = dev->wl; struct b43_wldev *orig_dev; + u32 mask; redo: if (!dev || b43_status(dev) < B43_STAT_STARTED) @@ -3920,7 +3921,8 @@ redo: goto redo; return dev; } - B43_WARN_ON(b43_read32(dev, B43_MMIO_GEN_IRQ_MASK)); + mask = b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); + B43_WARN_ON(mask != 0xFFFFFFFF && mask); /* Drain the TX queue */ while (skb_queue_len(&wl->tx_queue)) @@ -4499,6 +4501,7 @@ static void b43_op_stop(struct ieee80211_hw *hw) cancel_work_sync(&(wl->beacon_update_trigger)); + wiphy_rfkill_stop_polling(hw->wiphy); mutex_lock(&wl->mutex); if (b43_status(dev) >= B43_STAT_STARTED) { dev = b43_wireless_core_stop(dev); @@ -4997,7 +5000,7 @@ static void b43_remove(struct ssb_device *dev) if (list_empty(&wl->devlist)) { b43_rng_exit(wl); - b43_leds_unregister(wldev); + b43_leds_unregister(wl); /* Last core on the chip unregistered. * We can destroy common struct b43_wl. */ diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c index 5e87650b07f..9b904440021 100644 --- a/drivers/net/wireless/b43/pio.c +++ b/drivers/net/wireless/b43/pio.c @@ -332,6 +332,7 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q, unsigned int data_len) { struct b43_wldev *dev = q->dev; + struct b43_wl *wl = dev->wl; const u8 *data = _data; ctl |= B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_WRITEHI; @@ -341,13 +342,12 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q, q->mmio_base + B43_PIO_TXDATA, sizeof(u16)); if (data_len & 1) { - u8 tail[2] = { 0, }; - /* Write the last byte. */ ctl &= ~B43_PIO_TXCTL_WRITEHI; b43_piotx_write16(q, B43_PIO_TXCTL, ctl); - tail[0] = data[data_len - 1]; - ssb_block_write(dev->dev, tail, 2, + wl->tx_tail[0] = data[data_len - 1]; + wl->tx_tail[1] = 0; + ssb_block_write(dev->dev, wl->tx_tail, 2, q->mmio_base + B43_PIO_TXDATA, sizeof(u16)); } @@ -382,6 +382,7 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q, unsigned int data_len) { struct b43_wldev *dev = q->dev; + struct b43_wl *wl = dev->wl; const u8 *data = _data; ctl |= B43_PIO8_TXCTL_0_7 | B43_PIO8_TXCTL_8_15 | @@ -392,29 +393,31 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q, q->mmio_base + B43_PIO8_TXDATA, sizeof(u32)); if (data_len & 3) { - u8 tail[4] = { 0, }; - + wl->tx_tail[3] = 0; /* Write the last few bytes. */ ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_24_31); switch (data_len & 3) { case 3: ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15; - tail[0] = data[data_len - 3]; - tail[1] = data[data_len - 2]; - tail[2] = data[data_len - 1]; + wl->tx_tail[0] = data[data_len - 3]; + wl->tx_tail[1] = data[data_len - 2]; + wl->tx_tail[2] = data[data_len - 1]; break; case 2: ctl |= B43_PIO8_TXCTL_8_15; - tail[0] = data[data_len - 2]; - tail[1] = data[data_len - 1]; + wl->tx_tail[0] = data[data_len - 2]; + wl->tx_tail[1] = data[data_len - 1]; + wl->tx_tail[2] = 0; break; case 1: - tail[0] = data[data_len - 1]; + wl->tx_tail[0] = data[data_len - 1]; + wl->tx_tail[1] = 0; + wl->tx_tail[2] = 0; break; } b43_piotx_write32(q, B43_PIO8_TXCTL, ctl); - ssb_block_write(dev->dev, tail, 4, + ssb_block_write(dev->dev, wl->tx_tail, 4, q->mmio_base + B43_PIO8_TXDATA, sizeof(u32)); } @@ -446,8 +449,9 @@ static void pio_tx_frame_4byte_queue(struct b43_pio_txpacket *pack, static int pio_tx_frame(struct b43_pio_txqueue *q, struct sk_buff *skb) { + struct b43_wldev *dev = q->dev; + struct b43_wl *wl = dev->wl; struct b43_pio_txpacket *pack; - struct b43_txhdr txhdr; u16 cookie; int err; unsigned int hdrlen; @@ -458,8 +462,8 @@ static int pio_tx_frame(struct b43_pio_txqueue *q, struct b43_pio_txpacket, list); cookie = generate_cookie(q, pack); - hdrlen = b43_txhdr_size(q->dev); - err = b43_generate_txhdr(q->dev, (u8 *)&txhdr, skb, + hdrlen = b43_txhdr_size(dev); + err = b43_generate_txhdr(dev, (u8 *)&wl->txhdr, skb, info, cookie); if (err) return err; @@ -467,15 +471,15 @@ static int pio_tx_frame(struct b43_pio_txqueue *q, if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { /* Tell the firmware about the cookie of the last * mcast frame, so it can clear the more-data bit in it. */ - b43_shm_write16(q->dev, B43_SHM_SHARED, + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_MCASTCOOKIE, cookie); } pack->skb = skb; if (q->rev >= 8) - pio_tx_frame_4byte_queue(pack, (const u8 *)&txhdr, hdrlen); + pio_tx_frame_4byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen); else - pio_tx_frame_2byte_queue(pack, (const u8 *)&txhdr, hdrlen); + pio_tx_frame_2byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen); /* Remove it from the list of available packet slots. * It will be put back when we receive the status report. */ @@ -615,14 +619,14 @@ void b43_pio_get_tx_stats(struct b43_wldev *dev, static bool pio_rx_frame(struct b43_pio_rxqueue *q) { struct b43_wldev *dev = q->dev; - struct b43_rxhdr_fw4 rxhdr; + struct b43_wl *wl = dev->wl; u16 len; u32 macstat; unsigned int i, padding; struct sk_buff *skb; const char *err_msg = NULL; - memset(&rxhdr, 0, sizeof(rxhdr)); + memset(&wl->rxhdr, 0, sizeof(wl->rxhdr)); /* Check if we have data and wait for it to get ready. */ if (q->rev >= 8) { @@ -660,16 +664,16 @@ data_ready: /* Get the preamble (RX header) */ if (q->rev >= 8) { - ssb_block_read(dev->dev, &rxhdr, sizeof(rxhdr), + ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr), q->mmio_base + B43_PIO8_RXDATA, sizeof(u32)); } else { - ssb_block_read(dev->dev, &rxhdr, sizeof(rxhdr), + ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr), q->mmio_base + B43_PIO_RXDATA, sizeof(u16)); } /* Sanity checks. */ - len = le16_to_cpu(rxhdr.frame_len); + len = le16_to_cpu(wl->rxhdr.frame_len); if (unlikely(len > 0x700)) { err_msg = "len > 0x700"; goto rx_error; @@ -679,7 +683,7 @@ data_ready: goto rx_error; } - macstat = le32_to_cpu(rxhdr.mac_status); + macstat = le32_to_cpu(wl->rxhdr.mac_status); if (macstat & B43_RX_MAC_FCSERR) { if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) { /* Drop frames with failed FCS. */ @@ -704,24 +708,22 @@ data_ready: q->mmio_base + B43_PIO8_RXDATA, sizeof(u32)); if (len & 3) { - u8 tail[4] = { 0, }; - /* Read the last few bytes. */ - ssb_block_read(dev->dev, tail, 4, + ssb_block_read(dev->dev, wl->rx_tail, 4, q->mmio_base + B43_PIO8_RXDATA, sizeof(u32)); switch (len & 3) { case 3: - skb->data[len + padding - 3] = tail[0]; - skb->data[len + padding - 2] = tail[1]; - skb->data[len + padding - 1] = tail[2]; + skb->data[len + padding - 3] = wl->rx_tail[0]; + skb->data[len + padding - 2] = wl->rx_tail[1]; + skb->data[len + padding - 1] = wl->rx_tail[2]; break; case 2: - skb->data[len + padding - 2] = tail[0]; - skb->data[len + padding - 1] = tail[1]; + skb->data[len + padding - 2] = wl->rx_tail[0]; + skb->data[len + padding - 1] = wl->rx_tail[1]; break; case 1: - skb->data[len + padding - 1] = tail[0]; + skb->data[len + padding - 1] = wl->rx_tail[0]; break; } } @@ -730,17 +732,15 @@ data_ready: q->mmio_base + B43_PIO_RXDATA, sizeof(u16)); if (len & 1) { - u8 tail[2] = { 0, }; - /* Read the last byte. */ - ssb_block_read(dev->dev, tail, 2, + ssb_block_read(dev->dev, wl->rx_tail, 2, q->mmio_base + B43_PIO_RXDATA, sizeof(u16)); - skb->data[len + padding - 1] = tail[0]; + skb->data[len + padding - 1] = wl->rx_tail[0]; } } - b43_rx(q->dev, skb, &rxhdr); + b43_rx(q->dev, skb, &wl->rxhdr); return 1; diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c index ac9f600995e..f4e9695ec18 100644 --- a/drivers/net/wireless/b43/xmit.c +++ b/drivers/net/wireless/b43/xmit.c @@ -27,7 +27,7 @@ */ -#include "xmit.h" +#include "b43.h" #include "phy_common.h" #include "dma.h" #include "pio.h" @@ -690,7 +690,10 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) } memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); + + local_bh_disable(); ieee80211_rx(dev->wl->hw, skb); + local_bh_enable(); #if B43_DEBUG dev->rx_count++; diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c index a16bd4147ea..cbb0585083a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c @@ -702,7 +702,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, u8 sta_id = iwl_find_station(priv, hdr->addr1); if (sta_id == IWL_INVALID_STATION) { - IWL_DEBUG_RATE(priv, "LQ: ADD station %pm\n", + IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n", hdr->addr1); sta_id = iwl_add_station(priv, hdr->addr1, false, CMD_ASYNC, NULL); diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index 68136172b82..f059b49dc69 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c @@ -611,7 +611,7 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv, if (rx_status.band == IEEE80211_BAND_5GHZ) rx_status.rate_idx -= IWL_FIRST_OFDM_RATE; - rx_status.antenna = le16_to_cpu(rx_hdr->phy_flags & + rx_status.antenna = (le16_to_cpu(rx_hdr->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4; /* set the preamble flag if appropriate */ diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index d6bc0e05104..6e6f516ba40 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -318,7 +318,7 @@ static void iwl5000_gain_computation(struct iwl_priv *priv, (s32)average_noise[i])) / 1500; /* bound gain by 2 bits value max, 3rd bit is sign */ data->delta_gain_code[i] = - min(abs(delta_g), CHAIN_NOISE_MAX_DELTA_GAIN_CODE); + min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); if (delta_g < 0) /* set negative sign */ diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 313d3e5ee84..eaafae091f5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -3106,8 +3106,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) out_pci_disable_device: pci_disable_device(pdev); out_ieee80211_free_hw: - ieee80211_free_hw(priv->hw); iwl_free_traffic_mem(priv); + ieee80211_free_hw(priv->hw); out: return err; } diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h index 2c5c88fc38f..4afaf773aea 100644 --- a/drivers/net/wireless/iwlwifi/iwl-commands.h +++ b/drivers/net/wireless/iwlwifi/iwl-commands.h @@ -1154,7 +1154,7 @@ struct iwl_wep_cmd { #define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1) #define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2) #define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3) -#define RX_RES_PHY_FLAGS_ANTENNA_MSK cpu_to_le16(0xf0) +#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0xf0 #define RX_RES_PHY_FLAGS_ANTENNA_POS 4 #define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8) diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c index 3d2b93a61e6..e14c9952a93 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c @@ -410,7 +410,6 @@ static int iwl_find_otp_image(struct iwl_priv *priv, u16 *validblockaddr) { u16 next_link_addr = 0, link_value = 0, valid_addr; - int ret = 0; int usedblocks = 0; /* set addressing mode to absolute to traverse the link list */ @@ -430,29 +429,29 @@ static int iwl_find_otp_image(struct iwl_priv *priv, * check for more block on the link list */ valid_addr = next_link_addr; - next_link_addr = link_value; + next_link_addr = link_value * sizeof(u16); IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n", usedblocks, next_link_addr); if (iwl_read_otp_word(priv, next_link_addr, &link_value)) return -EINVAL; if (!link_value) { /* - * reach the end of link list, + * reach the end of link list, return success and * set address point to the starting address * of the image */ - goto done; + *validblockaddr = valid_addr; + /* skip first 2 bytes (link list pointer) */ + *validblockaddr += 2; + return 0; } /* more in the link list, continue */ usedblocks++; - } while (usedblocks < priv->cfg->max_ll_items); - /* OTP full, use last block */ - IWL_DEBUG_INFO(priv, "OTP is full, use last block\n"); -done: - *validblockaddr = valid_addr; - /* skip first 2 bytes (link list pointer) */ - *validblockaddr += 2; - return ret; + } while (usedblocks <= priv->cfg->max_ll_items); + + /* OTP has no valid blocks */ + IWL_DEBUG_INFO(priv, "OTP has no valid blocks\n"); + return -EINVAL; } /** diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h index 6b68db7b1b8..80b9e45d9b9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h @@ -220,35 +220,35 @@ struct iwl_eeprom_enhanced_txpwr { * Section 10: 2.4 GHz 40MHz channels: 132, 44 (_above_) */ /* 2.4 GHz band: CCK */ -#define EEPROM_LB_CCK_20_COMMON ((0xAA)\ +#define EEPROM_LB_CCK_20_COMMON ((0xA8)\ | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 8 bytes */ /* 2.4 GHz band: 20MHz-Legacy, 20MHz-HT, 40MHz-HT */ -#define EEPROM_LB_OFDM_COMMON ((0xB2)\ +#define EEPROM_LB_OFDM_COMMON ((0xB0)\ | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */ /* 5.2 GHz band: 20MHz-Legacy, 20MHz-HT, 40MHz-HT */ -#define EEPROM_HB_OFDM_COMMON ((0xCA)\ +#define EEPROM_HB_OFDM_COMMON ((0xC8)\ | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */ /* 2.4GHz band channels: * 1Legacy, 1HT, 2Legacy, 2HT, 10Legacy, 10HT, 11Legacy, 11HT */ -#define EEPROM_LB_OFDM_20_BAND ((0xE2)\ +#define EEPROM_LB_OFDM_20_BAND ((0xE0)\ | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 64 bytes */ /* 2.4 GHz band HT40 channels: (1,+1) (2,+1) (6,+1) (7,+1) (9,+1) */ -#define EEPROM_LB_OFDM_HT40_BAND ((0x122)\ +#define EEPROM_LB_OFDM_HT40_BAND ((0x120)\ | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 40 bytes */ /* 5.2GHz band channels: 36Legacy, 36HT, 64Legacy, 64HT, 100Legacy, 100HT */ -#define EEPROM_HB_OFDM_20_BAND ((0x14A)\ +#define EEPROM_HB_OFDM_20_BAND ((0x148)\ | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 48 bytes */ /* 5.2 GHz band HT40 channels: (36,+1) (60,+1) (100,+1) */ -#define EEPROM_HB_OFDM_HT40_BAND ((0x17A)\ +#define EEPROM_HB_OFDM_HT40_BAND ((0x178)\ | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */ /* 2.4 GHz band, channnel 13: Legacy, HT */ -#define EEPROM_LB_OFDM_20_CHANNEL_13 ((0x192)\ +#define EEPROM_LB_OFDM_20_CHANNEL_13 ((0x190)\ | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */ /* 5.2 GHz band, channnel 140: Legacy, HT */ -#define EEPROM_HB_OFDM_20_CHANNEL_140 ((0x1A2)\ +#define EEPROM_HB_OFDM_20_CHANNEL_140 ((0x1A0)\ | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */ /* 5.2 GHz band, HT40 channnels (132,+1) (44,+1) */ -#define EEPROM_HB_OFDM_HT40_BAND_1 ((0x1B2)\ +#define EEPROM_HB_OFDM_HT40_BAND_1 ((0x1B0)\ | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */ diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c index 8e1bb53c0aa..493626bcd3e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-rx.c @@ -1044,7 +1044,7 @@ void iwl_rx_reply_rx(struct iwl_priv *priv, * as a bitmask. */ rx_status.antenna = - le16_to_cpu(phy_res->phy_flags & RX_RES_PHY_FLAGS_ANTENNA_MSK) + (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> RX_RES_PHY_FLAGS_ANTENNA_POS; /* set the preamble flag if appropriate */ diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index aa49230422f..d00a8033409 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -4097,8 +4097,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); out_ieee80211_free_hw: - ieee80211_free_hw(priv->hw); iwl_free_traffic_mem(priv); + ieee80211_free_hw(priv->hw); out: return err; } diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c index c42d3faa266..23f684337fd 100644 --- a/drivers/net/wireless/libertas/cmdresp.c +++ b/drivers/net/wireless/libertas/cmdresp.c @@ -3,6 +3,7 @@ * responses as well as events generated by firmware. */ #include <linux/delay.h> +#include <linux/sched.h> #include <linux/if_arp.h> #include <linux/netdevice.h> #include <asm/unaligned.h> diff --git a/drivers/net/znet.c b/drivers/net/znet.c index a0384b6f09b..b4234733375 100644 --- a/drivers/net/znet.c +++ b/drivers/net/znet.c @@ -169,7 +169,6 @@ static void znet_tx_timeout (struct net_device *dev); static int znet_request_resources (struct net_device *dev) { struct znet_private *znet = netdev_priv(dev); - unsigned long flags; if (request_irq (dev->irq, &znet_interrupt, 0, "ZNet", dev)) goto failed; @@ -187,13 +186,9 @@ static int znet_request_resources (struct net_device *dev) free_sia: release_region (znet->sia_base, znet->sia_size); free_tx_dma: - flags = claim_dma_lock(); free_dma (znet->tx_dma); - release_dma_lock (flags); free_rx_dma: - flags = claim_dma_lock(); free_dma (znet->rx_dma); - release_dma_lock (flags); free_irq: free_irq (dev->irq, dev); failed: @@ -203,14 +198,11 @@ static int znet_request_resources (struct net_device *dev) static void znet_release_resources (struct net_device *dev) { struct znet_private *znet = netdev_priv(dev); - unsigned long flags; release_region (znet->sia_base, znet->sia_size); release_region (dev->base_addr, znet->io_size); - flags = claim_dma_lock(); free_dma (znet->tx_dma); free_dma (znet->rx_dma); - release_dma_lock (flags); free_irq (dev->irq, dev); } diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 94958c10976..812a5f3c2ab 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -557,7 +557,7 @@ struct netdev_queue { * Callback uses when the transmitter has not made any progress * for dev->watchdog ticks. * - * struct net_device_stats* (*get_stats)(struct net_device *dev); + * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); * Called when a user wants to get the network device usage * statistics. If not defined, the counters in dev->stats will * be used. diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 466859b285e..c75b960c8ac 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1669,6 +1669,8 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw); * to this function and ieee80211_rx_irqsafe() may not be mixed for a * single hardware. * + * Note that right now, this function must be called with softirqs disabled. + * * @hw: the hardware this frame came in on * @skb: the buffer to receive, owned by mac80211 after this call */ diff --git a/include/net/sock.h b/include/net/sock.h index 1621935aad5..9f96394f694 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -226,12 +226,12 @@ struct sock { #define sk_prot __sk_common.skc_prot #define sk_net __sk_common.skc_net kmemcheck_bitfield_begin(flags); - unsigned char sk_shutdown : 2, - sk_no_check : 2, - sk_userlocks : 4; + unsigned int sk_shutdown : 2, + sk_no_check : 2, + sk_userlocks : 4, + sk_protocol : 8, + sk_type : 16; kmemcheck_bitfield_end(flags); - unsigned char sk_protocol; - unsigned short sk_type; int sk_rcvbuf; socket_lock_t sk_lock; /* diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 624c3c9b3c2..e320afea07f 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -644,6 +644,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { + inet_csk(sk)->icsk_accept_queue.rskq_defer_accept--; inet_rsk(req)->acked = 1; return NULL; } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 6ec6a8a4a22..d0d436d6216 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -841,6 +841,42 @@ out: return ret; } + +/** + * first_packet_length - return length of first packet in receive queue + * @sk: socket + * + * Drops all bad checksum frames, until a valid one is found. + * Returns the length of found skb, or 0 if none is found. + */ +static unsigned int first_packet_length(struct sock *sk) +{ + struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue; + struct sk_buff *skb; + unsigned int res; + + __skb_queue_head_init(&list_kill); + + spin_lock_bh(&rcvq->lock); + while ((skb = skb_peek(rcvq)) != NULL && + udp_lib_checksum_complete(skb)) { + UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, + IS_UDPLITE(sk)); + __skb_unlink(skb, rcvq); + __skb_queue_tail(&list_kill, skb); + } + res = skb ? skb->len : 0; + spin_unlock_bh(&rcvq->lock); + + if (!skb_queue_empty(&list_kill)) { + lock_sock(sk); + __skb_queue_purge(&list_kill); + sk_mem_reclaim_partial(sk); + release_sock(sk); + } + return res; +} + /* * IOCTL requests applicable to the UDP protocol */ @@ -857,21 +893,16 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) case SIOCINQ: { - struct sk_buff *skb; - unsigned long amount; + unsigned int amount = first_packet_length(sk); - amount = 0; - spin_lock_bh(&sk->sk_receive_queue.lock); - skb = skb_peek(&sk->sk_receive_queue); - if (skb != NULL) { + if (amount) /* * We will only return the amount * of this packet since that is all * that will be read. */ - amount = skb->len - sizeof(struct udphdr); - } - spin_unlock_bh(&sk->sk_receive_queue.lock); + amount -= sizeof(struct udphdr); + return put_user(amount, (int __user *)arg); } @@ -1540,29 +1571,11 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) { unsigned int mask = datagram_poll(file, sock, wait); struct sock *sk = sock->sk; - int is_lite = IS_UDPLITE(sk); /* Check for false positives due to checksum errors */ - if ((mask & POLLRDNORM) && - !(file->f_flags & O_NONBLOCK) && - !(sk->sk_shutdown & RCV_SHUTDOWN)) { - struct sk_buff_head *rcvq = &sk->sk_receive_queue; - struct sk_buff *skb; - - spin_lock_bh(&rcvq->lock); - while ((skb = skb_peek(rcvq)) != NULL && - udp_lib_checksum_complete(skb)) { - UDP_INC_STATS_BH(sock_net(sk), - UDP_MIB_INERRORS, is_lite); - __skb_unlink(skb, rcvq); - kfree_skb(skb); - } - spin_unlock_bh(&rcvq->lock); - - /* nothing to see, move along */ - if (skb == NULL) - mask &= ~(POLLIN | POLLRDNORM); - } + if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) && + !(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk)) + mask &= ~(POLLIN | POLLRDNORM); return mask; diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 920ec8792f4..6eaf6982343 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -544,7 +544,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata) "%pM\n", bss->cbss.bssid, ifibss->bssid); #endif /* CONFIG_MAC80211_IBSS_DEBUG */ - if (bss && memcmp(ifibss->bssid, bss->cbss.bssid, ETH_ALEN)) { + if (bss && !memcmp(ifibss->bssid, bss->cbss.bssid, ETH_ALEN)) { printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM" " based on configured SSID\n", sdata->dev->name, bss->cbss.bssid); @@ -829,7 +829,7 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local) if (!sdata->u.ibss.ssid_len) continue; sdata->u.ibss.last_scan_completed = jiffies; - ieee80211_sta_find_ibss(sdata); + mod_timer(&sdata->u.ibss.timer, 0); } mutex_unlock(&local->iflist_mtx); } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index c01588f9d45..7170bf4565a 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2164,11 +2164,17 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, skb = rx.skb; - list_for_each_entry_rcu(sdata, &local->interfaces, list) { + if (rx.sdata && ieee80211_is_data(hdr->frame_control)) { + rx.flags |= IEEE80211_RX_RA_MATCH; + prepares = prepare_for_handlers(rx.sdata, &rx, hdr); + if (prepares) + prev = rx.sdata; + } else list_for_each_entry_rcu(sdata, &local->interfaces, list) { if (!netif_running(sdata->dev)) continue; - if (sdata->vif.type == NL80211_IFTYPE_MONITOR) + if (sdata->vif.type == NL80211_IFTYPE_MONITOR || + sdata->vif.type == NL80211_IFTYPE_AP_VLAN) continue; rx.flags |= IEEE80211_RX_RA_MATCH; @@ -2447,6 +2453,8 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) struct ieee80211_supported_band *sband; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + WARN_ON_ONCE(softirq_count() == 0); + if (WARN_ON(status->band < 0 || status->band >= IEEE80211_NUM_BANDS)) goto drop; diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index eec001491e6..594f2318c3d 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -361,6 +361,7 @@ int sta_info_insert(struct sta_info *sta) u.ap); drv_sta_notify(local, &sdata->vif, STA_NOTIFY_ADD, &sta->sta); + sdata = sta->sdata; } #ifdef CONFIG_MAC80211_VERBOSE_DEBUG @@ -496,6 +497,7 @@ static void __sta_info_unlink(struct sta_info **sta) drv_sta_notify(local, &sdata->vif, STA_NOTIFY_REMOVE, &(*sta)->sta); + sdata = (*sta)->sdata; } if (ieee80211_vif_is_mesh(&sdata->vif)) { diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index fd402829661..db4bda681ec 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1704,7 +1704,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, if (!is_multicast_ether_addr(hdr.addr1)) { rcu_read_lock(); sta = sta_info_get(local, hdr.addr1); - if (sta) + /* XXX: in the future, use sdata to look up the sta */ + if (sta && sta->sdata == sdata) sta_flags = get_sta_flags(sta); rcu_read_unlock(); } diff --git a/net/mac80211/util.c b/net/mac80211/util.c index dd656432136..aeb65b3d229 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -339,7 +339,7 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local, struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); if (WARN_ON(!info->control.vif)) { - kfree(skb); + kfree_skb(skb); return; } @@ -367,7 +367,7 @@ int ieee80211_add_pending_skbs(struct ieee80211_local *local, struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); if (WARN_ON(!info->control.vif)) { - kfree(skb); + kfree_skb(skb); continue; } diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 96c0ed115e2..6b0359a500e 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -34,7 +34,7 @@ static struct tcf_hashinfo pedit_hash_info = { }; static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = { - [TCA_PEDIT_PARMS] = { .len = sizeof(struct tcf_pedit) }, + [TCA_PEDIT_PARMS] = { .len = sizeof(struct tc_pedit) }, }; static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est, diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 6a536949cdc..7cf6c0fbc7a 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -350,7 +350,7 @@ static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp, tcm = NLMSG_DATA(nlh); tcm->tcm_family = AF_UNSPEC; tcm->tcm__pad1 = 0; - tcm->tcm__pad1 = 0; + tcm->tcm__pad2 = 0; tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex; tcm->tcm_parent = tp->classid; tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index eddab097435..ca3c92a0a14 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -4029,7 +4029,7 @@ static int nl80211_wiphy_netns(struct sk_buff *skb, struct genl_info *info) rdev = cfg80211_get_dev_from_info(info); if (IS_ERR(rdev)) { err = PTR_ERR(rdev); - goto out; + goto out_rtnl; } net = get_net_ns_by_pid(pid); @@ -4049,6 +4049,7 @@ static int nl80211_wiphy_netns(struct sk_buff *skb, struct genl_info *info) put_net(net); out: cfg80211_unlock_rdev(rdev); + out_rtnl: rtnl_unlock(); return err; } |