aboutsummaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/mv643xx_eth.h18
-rw-r--r--drivers/net/pcnet32.c4143
-rw-r--r--drivers/net/skfp/fplustm.c12
-rw-r--r--drivers/net/skge.c275
-rw-r--r--drivers/net/skge.h1
-rw-r--r--drivers/net/sky2.c583
-rw-r--r--drivers/net/sky2.h22
-rw-r--r--drivers/net/smc91x.c53
-rw-r--r--drivers/net/smc91x.h474
-rw-r--r--drivers/net/tg3.c32
11 files changed, 2973 insertions, 2642 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e0b11095b9d..00993e8ba58 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1914,7 +1914,7 @@ config E1000_DISABLE_PACKET_SPLIT
depends on E1000
help
Say Y here if you want to use the legacy receive path for PCI express
- hadware.
+ hardware.
If in doubt, say N.
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h
index 7754d1974b9..4262c1da6d4 100644
--- a/drivers/net/mv643xx_eth.h
+++ b/drivers/net/mv643xx_eth.h
@@ -42,13 +42,23 @@
#define MAX_DESCS_PER_SKB 1
#endif
+/*
+ * The MV643XX HW requires 8-byte alignment. However, when I/O
+ * is non-cache-coherent, we need to ensure that the I/O buffers
+ * we use don't share cache lines with other data.
+ */
+#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_NOT_COHERENT_CACHE)
+#define ETH_DMA_ALIGN L1_CACHE_BYTES
+#else
+#define ETH_DMA_ALIGN 8
+#endif
+
#define ETH_VLAN_HLEN 4
#define ETH_FCS_LEN 4
-#define ETH_DMA_ALIGN 8 /* hw requires 8-byte alignment */
-#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
+#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
#define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \
- ETH_VLAN_HLEN + ETH_FCS_LEN)
-#define ETH_RX_SKB_SIZE ((dev->mtu + ETH_WRAPPER_LEN + 7) & ~0x7)
+ ETH_VLAN_HLEN + ETH_FCS_LEN)
+#define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + ETH_DMA_ALIGN)
#define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */
#define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 7e900572eaf..9595f74da93 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -22,12 +22,12 @@
*************************************************************************/
#define DRV_NAME "pcnet32"
-#define DRV_VERSION "1.31c"
-#define DRV_RELDATE "01.Nov.2005"
+#define DRV_VERSION "1.32"
+#define DRV_RELDATE "18.Mar.2006"
#define PFX DRV_NAME ": "
-static const char * const version =
-DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
+static const char *const version =
+ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
#include <linux/module.h>
#include <linux/kernel.h>
@@ -58,18 +58,23 @@ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
* PCI device identifiers for "new style" Linux PCI Device Drivers
*/
static struct pci_device_id pcnet32_pci_tbl[] = {
- { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- /*
- * Adapters that were sold with IBM's RS/6000 or pSeries hardware have
- * the incorrect vendor id.
- */
- { PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE, PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, 0 },
- { 0, }
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+
+ /*
+ * Adapters that were sold with IBM's RS/6000 or pSeries hardware have
+ * the incorrect vendor id.
+ */
+ { PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, 0},
+
+ { } /* terminate list */
};
-MODULE_DEVICE_TABLE (pci, pcnet32_pci_tbl);
+MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl);
static int cards_found;
@@ -77,13 +82,11 @@ static int cards_found;
* VLB I/O addresses
*/
static unsigned int pcnet32_portlist[] __initdata =
- { 0x300, 0x320, 0x340, 0x360, 0 };
-
-
+ { 0x300, 0x320, 0x340, 0x360, 0 };
static int pcnet32_debug = 0;
-static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
-static int pcnet32vlb; /* check for VLB cards ? */
+static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
+static int pcnet32vlb; /* check for VLB cards ? */
static struct net_device *pcnet32_dev;
@@ -110,32 +113,34 @@ static int rx_copybreak = 200;
* to internal options
*/
static const unsigned char options_mapping[] = {
- PCNET32_PORT_ASEL, /* 0 Auto-select */
- PCNET32_PORT_AUI, /* 1 BNC/AUI */
- PCNET32_PORT_AUI, /* 2 AUI/BNC */
- PCNET32_PORT_ASEL, /* 3 not supported */
- PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */
- PCNET32_PORT_ASEL, /* 5 not supported */
- PCNET32_PORT_ASEL, /* 6 not supported */
- PCNET32_PORT_ASEL, /* 7 not supported */
- PCNET32_PORT_ASEL, /* 8 not supported */
- PCNET32_PORT_MII, /* 9 MII 10baseT */
- PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */
- PCNET32_PORT_MII, /* 11 MII (autosel) */
- PCNET32_PORT_10BT, /* 12 10BaseT */
- PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */
- PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD, /* 14 MII 100BaseTx-FD */
- PCNET32_PORT_ASEL /* 15 not supported */
+ PCNET32_PORT_ASEL, /* 0 Auto-select */
+ PCNET32_PORT_AUI, /* 1 BNC/AUI */
+ PCNET32_PORT_AUI, /* 2 AUI/BNC */
+ PCNET32_PORT_ASEL, /* 3 not supported */
+ PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */
+ PCNET32_PORT_ASEL, /* 5 not supported */
+ PCNET32_PORT_ASEL, /* 6 not supported */
+ PCNET32_PORT_ASEL, /* 7 not supported */
+ PCNET32_PORT_ASEL, /* 8 not supported */
+ PCNET32_PORT_MII, /* 9 MII 10baseT */
+ PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */
+ PCNET32_PORT_MII, /* 11 MII (autosel) */
+ PCNET32_PORT_10BT, /* 12 10BaseT */
+ PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */
+ /* 14 MII 100BaseTx-FD */
+ PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD,
+ PCNET32_PORT_ASEL /* 15 not supported */
};
static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = {
- "Loopback test (offline)"
+ "Loopback test (offline)"
};
+
#define PCNET32_TEST_LEN (sizeof(pcnet32_gstrings_test) / ETH_GSTRING_LEN)
-#define PCNET32_NUM_REGS 168
+#define PCNET32_NUM_REGS 136
-#define MAX_UNITS 8 /* More are supported, limit only on options */
+#define MAX_UNITS 8 /* More are supported, limit only on options */
static int options[MAX_UNITS];
static int full_duplex[MAX_UNITS];
static int homepna[MAX_UNITS];
@@ -151,124 +156,6 @@ static int homepna[MAX_UNITS];
*/
/*
- * History:
- * v0.01: Initial version
- * only tested on Alpha Noname Board
- * v0.02: changed IRQ handling for new interrupt scheme (dev_id)
- * tested on a ASUS SP3G
- * v0.10: fixed an odd problem with the 79C974 in a Compaq Deskpro XL
- * looks like the 974 doesn't like stopping and restarting in a
- * short period of time; now we do a reinit of the lance; the
- * bug was triggered by doing ifconfig eth0 <ip> broadcast <addr>
- * and hangs the machine (thanks to Klaus Liedl for debugging)
- * v0.12: by suggestion from Donald Becker: Renamed driver to pcnet32,
- * made it standalone (no need for lance.c)
- * v0.13: added additional PCI detecting for special PCI devices (Compaq)
- * v0.14: stripped down additional PCI probe (thanks to David C Niemi
- * and sveneric@xs4all.nl for testing this on their Compaq boxes)
- * v0.15: added 79C965 (VLB) probe
- * added interrupt sharing for PCI chips
- * v0.16: fixed set_multicast_list on Alpha machines
- * v0.17: removed hack from dev.c; now pcnet32 uses ethif_probe in Space.c
- * v0.19: changed setting of autoselect bit
- * v0.20: removed additional Compaq PCI probe; there is now a working one
- * in arch/i386/bios32.c
- * v0.21: added endian conversion for ppc, from work by cort@cs.nmt.edu
- * v0.22: added printing of status to ring dump
- * v0.23: changed enet_statistics to net_devive_stats
- * v0.90: added multicast filter
- * added module support
- * changed irq probe to new style
- * added PCnetFast chip id
- * added fix for receive stalls with Intel saturn chipsets
- * added in-place rx skbs like in the tulip driver
- * minor cleanups
- * v0.91: added PCnetFast+ chip id
- * back port to 2.0.x
- * v1.00: added some stuff from Donald Becker's 2.0.34 version
- * added support for byte counters in net_dev_stats
- * v1.01: do ring dumps, only when debugging the driver
- * increased the transmit timeout
- * v1.02: fixed memory leak in pcnet32_init_ring()
- * v1.10: workaround for stopped transmitter
- * added port selection for modules
- * detect special T1/E1 WAN card and setup port selection
- * v1.11: fixed wrong checking of Tx errors
- * v1.20: added check of return value kmalloc (cpeterso@cs.washington.edu)
- * added save original kmalloc addr for freeing (mcr@solidum.com)
- * added support for PCnetHome chip (joe@MIT.EDU)
- * rewritten PCI card detection
- * added dwio mode to get driver working on some PPC machines
- * v1.21: added mii selection and mii ioctl
- * v1.22: changed pci scanning code to make PPC people happy
- * fixed switching to 32bit mode in pcnet32_open() (thanks
- * to Michael Richard <mcr@solidum.com> for noticing this one)
- * added sub vendor/device id matching (thanks again to
- * Michael Richard <mcr@solidum.com>)
- * added chip id for 79c973/975 (thanks to Zach Brown <zab@zabbo.net>)
- * v1.23 fixed small bug, when manual selecting MII speed/duplex
- * v1.24 Applied Thomas' patch to use TxStartPoint and thus decrease TxFIFO
- * underflows. Added tx_start_pt module parameter. Increased
- * TX_RING_SIZE from 16 to 32. Added #ifdef'd code to use DXSUFLO
- * for FAST[+] chipsets. <kaf@fc.hp.com>
- * v1.24ac Added SMP spinlocking - Alan Cox <alan@redhat.com>
- * v1.25kf Added No Interrupt on successful Tx for some Tx's <kaf@fc.hp.com>
- * v1.26 Converted to pci_alloc_consistent, Jamey Hicks / George France
- * <jamey@crl.dec.com>
- * - Fixed a few bugs, related to running the controller in 32bit mode.
- * 23 Oct, 2000. Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
- * v1.26p Fix oops on rmmod+insmod; plug i/o resource leak - Paul Gortmaker
- * v1.27 improved CSR/PROM address detection, lots of cleanups,
- * new pcnet32vlb module option, HP-PARISC support,
- * added module parameter descriptions,
- * initial ethtool support - Helge Deller <deller@gmx.de>
- * v1.27a Sun Feb 10 2002 Go Taniguchi <go@turbolinux.co.jp>
- * use alloc_etherdev and register_netdev
- * fix pci probe not increment cards_found
- * FD auto negotiate error workaround for xSeries250
- * clean up and using new mii module
- * v1.27b Sep 30 2002 Kent Yoder <yoder1@us.ibm.com>
- * Added timer for cable connection state changes.
- * v1.28 20 Feb 2004 Don Fry <brazilnut@us.ibm.com>
- * Jon Mason <jonmason@us.ibm.com>, Chinmay Albal <albal@in.ibm.com>
- * Now uses ethtool_ops, netif_msg_* and generic_mii_ioctl.
- * Fixes bogus 'Bus master arbitration failure', pci_[un]map_single
- * length errors, and transmit hangs. Cleans up after errors in open.
- * Jim Lewis <jklewis@us.ibm.com> added ethernet loopback test.
- * Thomas Munck Steenholdt <tmus@tmus.dk> non-mii ioctl corrections.
- * v1.29 6 Apr 2004 Jim Lewis <jklewis@us.ibm.com> added physical
- * identification code (blink led's) and register dump.
- * Don Fry added timer for 971/972 so skbufs don't remain on tx ring
- * forever.
- * v1.30 18 May 2004 Don Fry removed timer and Last Transmit Interrupt
- * (ltint) as they added complexity and didn't give good throughput.
- * v1.30a 22 May 2004 Don Fry limit frames received during interrupt.
- * v1.30b 24 May 2004 Don Fry fix bogus tx carrier errors with 79c973,
- * assisted by Bruce Penrod <bmpenrod@endruntechnologies.com>.
- * v1.30c 25 May 2004 Don Fry added netif_wake_queue after pcnet32_restart.
- * v1.30d 01 Jun 2004 Don Fry discard oversize rx packets.
- * v1.30e 11 Jun 2004 Don Fry recover after fifo error and rx hang.
- * v1.30f 16 Jun 2004 Don Fry cleanup IRQ to allow 0 and 1 for PCI,
- * expanding on suggestions from Ralf Baechle <ralf@linux-mips.org>,
- * and Brian Murphy <brian@murphy.dk>.
- * v1.30g 22 Jun 2004 Patrick Simmons <psimmons@flash.net> added option
- * homepna for selecting HomePNA mode for PCNet/Home 79C978.
- * v1.30h 24 Jun 2004 Don Fry correctly select auto, speed, duplex in bcr32.
- * v1.30i 28 Jun 2004 Don Fry change to use module_param.
- * v1.30j 29 Apr 2005 Don Fry fix skb/map leak with loopback test.
- * v1.31 02 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> added set_ringparam().
- * v1.31a 12 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> set min ring size to 4
- * to allow loopback test to work unchanged.
- * v1.31b 06 Oct 2005 Don Fry changed alloc_ring to show name of device
- * if allocation fails
- * v1.31c 01 Nov 2005 Don Fry Allied Telesyn 2700/2701 FX are 100Mbit only.
- * Force 100Mbit FD if Auto (ASEL) is selected.
- * See Bugzilla 2669 and 4551.
- */
-
-
-/*
* Set the number of Tx and Rx buffers, using Log_2(# buffers).
* Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
* That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
@@ -303,42 +190,42 @@ static int homepna[MAX_UNITS];
/* The PCNET32 Rx and Tx ring descriptors. */
struct pcnet32_rx_head {
- u32 base;
- s16 buf_length;
- s16 status;
- u32 msg_length;
- u32 reserved;
+ u32 base;
+ s16 buf_length;
+ s16 status;
+ u32 msg_length;
+ u32 reserved;
};
struct pcnet32_tx_head {
- u32 base;
- s16 length;
- s16 status;
- u32 misc;
- u32 reserved;
+ u32 base;
+ s16 length;
+ s16 status;
+ u32 misc;
+ u32 reserved;
};
/* The PCNET32 32-Bit initialization block, described in databook. */
struct pcnet32_init_block {
- u16 mode;
- u16 tlen_rlen;
- u8 phys_addr[6];
- u16 reserved;
- u32 filter[2];
- /* Receive and transmit ring base, along with extra bits. */
- u32 rx_ring;
- u32 tx_ring;
+ u16 mode;
+ u16 tlen_rlen;
+ u8 phys_addr[6];
+ u16 reserved;
+ u32 filter[2];
+ /* Receive and transmit ring base, along with extra bits. */
+ u32 rx_ring;
+ u32 tx_ring;
};
/* PCnet32 access functions */
struct pcnet32_access {
- u16 (*read_csr)(unsigned long, int);
- void (*write_csr)(unsigned long, int, u16);
- u16 (*read_bcr)(unsigned long, int);
- void (*write_bcr)(unsigned long, int, u16);
- u16 (*read_rap)(unsigned long);
- void (*write_rap)(unsigned long, u16);
- void (*reset)(unsigned long);
+ u16 (*read_csr) (unsigned long, int);
+ void (*write_csr) (unsigned long, int, u16);
+ u16 (*read_bcr) (unsigned long, int);
+ void (*write_bcr) (unsigned long, int, u16);
+ u16 (*read_rap) (unsigned long);
+ void (*write_rap) (unsigned long, u16);
+ void (*reset) (unsigned long);
};
/*
@@ -346,760 +233,794 @@ struct pcnet32_access {
* so the structure should be allocated using pci_alloc_consistent().
*/
struct pcnet32_private {
- struct pcnet32_init_block init_block;
- /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
- struct pcnet32_rx_head *rx_ring;
- struct pcnet32_tx_head *tx_ring;
- dma_addr_t dma_addr; /* DMA address of beginning of this
- object, returned by
- pci_alloc_consistent */
- struct pci_dev *pci_dev; /* Pointer to the associated pci device
- structure */
- const char *name;
- /* The saved address of a sent-in-place packet/buffer, for skfree(). */
- struct sk_buff **tx_skbuff;
- struct sk_buff **rx_skbuff;
- dma_addr_t *tx_dma_addr;
- dma_addr_t *rx_dma_addr;
- struct pcnet32_access a;
- spinlock_t lock; /* Guard lock */
- unsigned int cur_rx, cur_tx; /* The next free ring entry */
- unsigned int rx_ring_size; /* current rx ring size */
- unsigned int tx_ring_size; /* current tx ring size */
- unsigned int rx_mod_mask; /* rx ring modular mask */
- unsigned int tx_mod_mask; /* tx ring modular mask */
- unsigned short rx_len_bits;
- unsigned short tx_len_bits;
- dma_addr_t rx_ring_dma_addr;
- dma_addr_t tx_ring_dma_addr;
- unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
- struct net_device_stats stats;
- char tx_full;
- int options;
- unsigned int shared_irq:1, /* shared irq possible */
- dxsuflo:1, /* disable transmit stop on uflo */
- mii:1; /* mii port available */
- struct net_device *next;
- struct mii_if_info mii_if;
- struct timer_list watchdog_timer;
- struct timer_list blink_timer;
- u32 msg_enable; /* debug message level */
+ struct pcnet32_init_block init_block;
+ /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
+ struct pcnet32_rx_head *rx_ring;
+ struct pcnet32_tx_head *tx_ring;
+ dma_addr_t dma_addr;/* DMA address of beginning of this
+ object, returned by pci_alloc_consistent */
+ struct pci_dev *pci_dev;
+ const char *name;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff **tx_skbuff;
+ struct sk_buff **rx_skbuff;
+ dma_addr_t *tx_dma_addr;
+ dma_addr_t *rx_dma_addr;
+ struct pcnet32_access a;
+ spinlock_t lock; /* Guard lock */
+ unsigned int cur_rx, cur_tx; /* The next free ring entry */
+ unsigned int rx_ring_size; /* current rx ring size */
+ unsigned int tx_ring_size; /* current tx ring size */
+ unsigned int rx_mod_mask; /* rx ring modular mask */
+ unsigned int tx_mod_mask; /* tx ring modular mask */
+ unsigned short rx_len_bits;
+ unsigned short tx_len_bits;
+ dma_addr_t rx_ring_dma_addr;
+ dma_addr_t tx_ring_dma_addr;
+ unsigned int dirty_rx, /* ring entries to be freed. */
+ dirty_tx;
+
+ struct net_device_stats stats;
+ char tx_full;
+ char phycount; /* number of phys found */
+ int options;
+ unsigned int shared_irq:1, /* shared irq possible */
+ dxsuflo:1, /* disable transmit stop on uflo */
+ mii:1; /* mii port available */
+ struct net_device *next;
+ struct mii_if_info mii_if;
+ struct timer_list watchdog_timer;
+ struct timer_list blink_timer;
+ u32 msg_enable; /* debug message level */
+
+ /* each bit indicates an available PHY */
+ u32 phymask;
};
static void pcnet32_probe_vlbus(void);
-static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
-static int pcnet32_probe1(unsigned long, int, struct pci_dev *);
-static int pcnet32_open(struct net_device *);
-static int pcnet32_init_ring(struct net_device *);
-static int pcnet32_start_xmit(struct sk_buff *, struct net_device *);
-static int pcnet32_rx(struct net_device *);
-static void pcnet32_tx_timeout (struct net_device *dev);
+static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
+static int pcnet32_probe1(unsigned long, int, struct pci_dev *);
+static int pcnet32_open(struct net_device *);
+static int pcnet32_init_ring(struct net_device *);
+static int pcnet32_start_xmit(struct sk_buff *, struct net_device *);
+static int pcnet32_rx(struct net_device *);
+static void pcnet32_tx_timeout(struct net_device *dev);
static irqreturn_t pcnet32_interrupt(int, void *, struct pt_regs *);
-static int pcnet32_close(struct net_device *);
+static int pcnet32_close(struct net_device *);
static struct net_device_stats *pcnet32_get_stats(struct net_device *);
static void pcnet32_load_multicast(struct net_device *dev);
static void pcnet32_set_multicast_list(struct net_device *);
-static int pcnet32_ioctl(struct net_device *, struct ifreq *, int);
+static int pcnet32_ioctl(struct net_device *, struct ifreq *, int);
static void pcnet32_watchdog(struct net_device *);
static int mdio_read(struct net_device *dev, int phy_id, int reg_num);
-static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val);
+static void mdio_write(struct net_device *dev, int phy_id, int reg_num,
+ int val);
static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits);
static void pcnet32_ethtool_test(struct net_device *dev,
- struct ethtool_test *eth_test, u64 *data);
-static int pcnet32_loopback_test(struct net_device *dev, uint64_t *data1);
+ struct ethtool_test *eth_test, u64 * data);
+static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1);
static int pcnet32_phys_id(struct net_device *dev, u32 data);
static void pcnet32_led_blink_callback(struct net_device *dev);
static int pcnet32_get_regs_len(struct net_device *dev);
static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
- void *ptr);
+ void *ptr);
static void pcnet32_purge_tx_ring(struct net_device *dev);
static int pcnet32_alloc_ring(struct net_device *dev, char *name);
static void pcnet32_free_ring(struct net_device *dev);
-
+static void pcnet32_check_media(struct net_device *dev, int verbose);
enum pci_flags_bit {
- PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
- PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
+ PCI_USES_IO = 1, PCI_USES_MEM = 2, PCI_USES_MASTER = 4,
+ PCI_ADDR0 = 0x10 << 0, PCI_ADDR1 = 0x10 << 1, PCI_ADDR2 =
+ 0x10 << 2, PCI_ADDR3 = 0x10 << 3,
};
-
-static u16 pcnet32_wio_read_csr (unsigned long addr, int index)
+static u16 pcnet32_wio_read_csr(unsigned long addr, int index)
{
- outw (index, addr+PCNET32_WIO_RAP);
- return inw (addr+PCNET32_WIO_RDP);
+ outw(index, addr + PCNET32_WIO_RAP);
+ return inw(addr + PCNET32_WIO_RDP);
}
-static void pcnet32_wio_write_csr (unsigned long addr, int index, u16 val)
+static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val)
{
- outw (index, addr+PCNET32_WIO_RAP);
- outw (val, addr+PCNET32_WIO_RDP);
+ outw(index, addr + PCNET32_WIO_RAP);
+ outw(val, addr + PCNET32_WIO_RDP);
}
-static u16 pcnet32_wio_read_bcr (unsigned long addr, int index)
+static u16 pcnet32_wio_read_bcr(unsigned long addr, int index)
{
- outw (index, addr+PCNET32_WIO_RAP);
- return inw (addr+PCNET32_WIO_BDP);
+ outw(index, addr + PCNET32_WIO_RAP);
+ return inw(addr + PCNET32_WIO_BDP);
}
-static void pcnet32_wio_write_bcr (unsigned long addr, int index, u16 val)
+static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val)
{
- outw (index, addr+PCNET32_WIO_RAP);
- outw (val, addr+PCNET32_WIO_BDP);
+ outw(index, addr + PCNET32_WIO_RAP);
+ outw(val, addr + PCNET32_WIO_BDP);
}
-static u16 pcnet32_wio_read_rap (unsigned long addr)
+static u16 pcnet32_wio_read_rap(unsigned long addr)
{
- return inw (addr+PCNET32_WIO_RAP);
+ return inw(addr + PCNET32_WIO_RAP);
}
-static void pcnet32_wio_write_rap (unsigned long addr, u16 val)
+static void pcnet32_wio_write_rap(unsigned long addr, u16 val)
{
- outw (val, addr+PCNET32_WIO_RAP);
+ outw(val, addr + PCNET32_WIO_RAP);
}
-static void pcnet32_wio_reset (unsigned long addr)
+static void pcnet32_wio_reset(unsigned long addr)
{
- inw (addr+PCNET32_WIO_RESET);
+ inw(addr + PCNET32_WIO_RESET);
}
-static int pcnet32_wio_check (unsigned long addr)
+static int pcnet32_wio_check(unsigned long addr)
{
- outw (88, addr+PCNET32_WIO_RAP);
- return (inw (addr+PCNET32_WIO_RAP) == 88);
+ outw(88, addr + PCNET32_WIO_RAP);
+ return (inw(addr + PCNET32_WIO_RAP) == 88);
}
static struct pcnet32_access pcnet32_wio = {
- .read_csr = pcnet32_wio_read_csr,
- .write_csr = pcnet32_wio_write_csr,
- .read_bcr = pcnet32_wio_read_bcr,
- .write_bcr = pcnet32_wio_write_bcr,
- .read_rap = pcnet32_wio_read_rap,
- .write_rap = pcnet32_wio_write_rap,
- .reset = pcnet32_wio_reset
+ .read_csr = pcnet32_wio_read_csr,
+ .write_csr = pcnet32_wio_write_csr,
+ .read_bcr = pcnet32_wio_read_bcr,
+ .write_bcr = pcnet32_wio_write_bcr,
+ .read_rap = pcnet32_wio_read_rap,
+ .write_rap = pcnet32_wio_write_rap,
+ .reset = pcnet32_wio_reset
};
-static u16 pcnet32_dwio_read_csr (unsigned long addr, int index)
+static u16 pcnet32_dwio_read_csr(unsigned long addr, int index)
{
- outl (index, addr+PCNET32_DWIO_RAP);
- return (inl (addr+PCNET32_DWIO_RDP) & 0xffff);
+ outl(index, addr + PCNET32_DWIO_RAP);
+ return (inl(addr + PCNET32_DWIO_RDP) & 0xffff);
}
-static void pcnet32_dwio_write_csr (unsigned long addr, int index, u16 val)
+static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
{
- outl (index, addr+PCNET32_DWIO_RAP);
- outl (val, addr+PCNET32_DWIO_RDP);
+ outl(index, addr + PCNET32_DWIO_RAP);
+ outl(val, addr + PCNET32_DWIO_RDP);
}
-static u16 pcnet32_dwio_read_bcr (unsigned long addr, int index)
+static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index)
{
- outl (index, addr+PCNET32_DWIO_RAP);
- return (inl (addr+PCNET32_DWIO_BDP) & 0xffff);
+ outl(index, addr + PCNET32_DWIO_RAP);
+ return (inl(addr + PCNET32_DWIO_BDP) & 0xffff);
}
-static void pcnet32_dwio_write_bcr (unsigned long addr, int index, u16 val)
+static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
{
- outl (index, addr+PCNET32_DWIO_RAP);
- outl (val, addr+PCNET32_DWIO_BDP);
+ outl(index, addr + PCNET32_DWIO_RAP);
+ outl(val, addr + PCNET32_DWIO_BDP);
}
-static u16 pcnet32_dwio_read_rap (unsigned long addr)
+static u16 pcnet32_dwio_read_rap(unsigned long addr)
{
- return (inl (addr+PCNET32_DWIO_RAP) & 0xffff);
+ return (inl(addr + PCNET32_DWIO_RAP) & 0xffff);
}
-static void pcnet32_dwio_write_rap (unsigned long addr, u16 val)
+static void pcnet32_dwio_write_rap(unsigned long addr, u16 val)
{
- outl (val, addr+PCNET32_DWIO_RAP);
+ outl(val, addr + PCNET32_DWIO_RAP);
}
-static void pcnet32_dwio_reset (unsigned long addr)
+static void pcnet32_dwio_reset(unsigned long addr)
{
- inl (addr+PCNET32_DWIO_RESET);
+ inl(addr + PCNET32_DWIO_RESET);
}
-static int pcnet32_dwio_check (unsigned long addr)
+static int pcnet32_dwio_check(unsigned long addr)
{
- outl (88, addr+PCNET32_DWIO_RAP);
- return ((inl (addr+PCNET32_DWIO_RAP) & 0xffff) == 88);
+ outl(88, addr + PCNET32_DWIO_RAP);
+ return ((inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88);
}
static struct pcnet32_access pcnet32_dwio = {
- .read_csr = pcnet32_dwio_read_csr,
- .write_csr = pcnet32_dwio_write_csr,
- .read_bcr = pcnet32_dwio_read_bcr,
- .write_bcr = pcnet32_dwio_write_bcr,
- .read_rap = pcnet32_dwio_read_rap,
- .write_rap = pcnet32_dwio_write_rap,
- .reset = pcnet32_dwio_reset
+ .read_csr = pcnet32_dwio_read_csr,
+ .write_csr = pcnet32_dwio_write_csr,
+ .read_bcr = pcnet32_dwio_read_bcr,
+ .write_bcr = pcnet32_dwio_write_bcr,
+ .read_rap = pcnet32_dwio_read_rap,
+ .write_rap = pcnet32_dwio_write_rap,
+ .reset = pcnet32_dwio_reset
};
#ifdef CONFIG_NET_POLL_CONTROLLER
static void pcnet32_poll_controller(struct net_device *dev)
{
- disable_irq(dev->irq);
- pcnet32_interrupt(0, dev, NULL);
- enable_irq(dev->irq);
+ disable_irq(dev->irq);
+ pcnet32_interrupt(0, dev, NULL);
+ enable_irq(dev->irq);
}
#endif
-
static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long flags;
- int r = -EOPNOTSUPP;
-
- if (lp->mii) {
- spin_lock_irqsave(&lp->lock, flags);
- mii_ethtool_gset(&lp->mii_if, cmd);
- spin_unlock_irqrestore(&lp->lock, flags);
- r = 0;
- }
- return r;
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long flags;
+ int r = -EOPNOTSUPP;
+
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ mii_ethtool_gset(&lp->mii_if, cmd);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ r = 0;
+ }
+ return r;
}
static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long flags;
- int r = -EOPNOTSUPP;
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long flags;
+ int r = -EOPNOTSUPP;
- if (lp->mii) {
- spin_lock_irqsave(&lp->lock, flags);
- r = mii_ethtool_sset(&lp->mii_if, cmd);
- spin_unlock_irqrestore(&lp->lock, flags);
- }
- return r;
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ r = mii_ethtool_sset(&lp->mii_if, cmd);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return r;
}
-static void pcnet32_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+static void pcnet32_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
{
- struct pcnet32_private *lp = dev->priv;
-
- strcpy (info->driver, DRV_NAME);
- strcpy (info->version, DRV_VERSION);
- if (lp->pci_dev)
- strcpy (info->bus_info, pci_name(lp->pci_dev));
- else
- sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr);
+ struct pcnet32_private *lp = dev->priv;
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ if (lp->pci_dev)
+ strcpy(info->bus_info, pci_name(lp->pci_dev));
+ else
+ sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr);
}
static u32 pcnet32_get_link(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long flags;
- int r;
-
- spin_lock_irqsave(&lp->lock, flags);
- if (lp->mii) {
- r = mii_link_ok(&lp->mii_if);
- } else {
- ulong ioaddr = dev->base_addr; /* card base I/O address */
- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
- }
- spin_unlock_irqrestore(&lp->lock, flags);
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long flags;
+ int r;
- return r;
+ spin_lock_irqsave(&lp->lock, flags);
+ if (lp->mii) {
+ r = mii_link_ok(&lp->mii_if);
+ } else {
+ ulong ioaddr = dev->base_addr; /* card base I/O address */
+ r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return r;
}
static u32 pcnet32_get_msglevel(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- return lp->msg_enable;
+ struct pcnet32_private *lp = dev->priv;
+ return lp->msg_enable;
}
static void pcnet32_set_msglevel(struct net_device *dev, u32 value)
{
- struct pcnet32_private *lp = dev->priv;
- lp->msg_enable = value;
+ struct pcnet32_private *lp = dev->priv;
+ lp->msg_enable = value;
}
static int pcnet32_nway_reset(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long flags;
- int r = -EOPNOTSUPP;
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long flags;
+ int r = -EOPNOTSUPP;
- if (lp->mii) {
- spin_lock_irqsave(&lp->lock, flags);
- r = mii_nway_restart(&lp->mii_if);
- spin_unlock_irqrestore(&lp->lock, flags);
- }
- return r;
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ r = mii_nway_restart(&lp->mii_if);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return r;
}
-static void pcnet32_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+static void pcnet32_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
{
- struct pcnet32_private *lp = dev->priv;
+ struct pcnet32_private *lp = dev->priv;
- ering->tx_max_pending = TX_MAX_RING_SIZE - 1;
- ering->tx_pending = lp->tx_ring_size - 1;
- ering->rx_max_pending = RX_MAX_RING_SIZE - 1;
- ering->rx_pending = lp->rx_ring_size - 1;
+ ering->tx_max_pending = TX_MAX_RING_SIZE - 1;
+ ering->tx_pending = lp->tx_ring_size - 1;
+ ering->rx_max_pending = RX_MAX_RING_SIZE - 1;
+ ering->rx_pending = lp->rx_ring_size - 1;
}
-static int pcnet32_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+static int pcnet32_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long flags;
- int i;
-
- if (ering->rx_mini_pending || ering->rx_jumbo_pending)
- return -EINVAL;
-
- if (netif_running(dev))
- pcnet32_close(dev);
-
- spin_lock_irqsave(&lp->lock, flags);
- pcnet32_free_ring(dev);
- lp->tx_ring_size = min(ering->tx_pending, (unsigned int) TX_MAX_RING_SIZE);
- lp->rx_ring_size = min(ering->rx_pending, (unsigned int) RX_MAX_RING_SIZE);
-
- /* set the minimum ring size to 4, to allow the loopback test to work
- * unchanged.
- */
- for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
- if (lp->tx_ring_size <= (1 << i))
- break;
- }
- lp->tx_ring_size = (1 << i);
- lp->tx_mod_mask = lp->tx_ring_size - 1;
- lp->tx_len_bits = (i << 12);
-
- for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
- if (lp->rx_ring_size <= (1 << i))
- break;
- }
- lp->rx_ring_size = (1 << i);
- lp->rx_mod_mask = lp->rx_ring_size - 1;
- lp->rx_len_bits = (i << 4);
-
- if (pcnet32_alloc_ring(dev, dev->name)) {
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long flags;
+ int i;
+
+ if (ering->rx_mini_pending || ering->rx_jumbo_pending)
+ return -EINVAL;
+
+ if (netif_running(dev))
+ pcnet32_close(dev);
+
+ spin_lock_irqsave(&lp->lock, flags);
pcnet32_free_ring(dev);
- spin_unlock_irqrestore(&lp->lock, flags);
- return -ENOMEM;
- }
+ lp->tx_ring_size =
+ min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
+ lp->rx_ring_size =
+ min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
+
+ /* set the minimum ring size to 4, to allow the loopback test to work
+ * unchanged.
+ */
+ for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
+ if (lp->tx_ring_size <= (1 << i))
+ break;
+ }
+ lp->tx_ring_size = (1 << i);
+ lp->tx_mod_mask = lp->tx_ring_size - 1;
+ lp->tx_len_bits = (i << 12);
- spin_unlock_irqrestore(&lp->lock, flags);
+ for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
+ if (lp->rx_ring_size <= (1 << i))
+ break;
+ }
+ lp->rx_ring_size = (1 << i);
+ lp->rx_mod_mask = lp->rx_ring_size - 1;
+ lp->rx_len_bits = (i << 4);
+
+ if (pcnet32_alloc_ring(dev, dev->name)) {
+ pcnet32_free_ring(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return -ENOMEM;
+ }
- if (pcnet32_debug & NETIF_MSG_DRV)
- printk(KERN_INFO PFX "%s: Ring Param Settings: RX: %d, TX: %d\n",
- dev->name, lp->rx_ring_size, lp->tx_ring_size);
+ spin_unlock_irqrestore(&lp->lock, flags);
- if (netif_running(dev))
- pcnet32_open(dev);
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk(KERN_INFO PFX
+ "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name,
+ lp->rx_ring_size, lp->tx_ring_size);
- return 0;
+ if (netif_running(dev))
+ pcnet32_open(dev);
+
+ return 0;
}
-static void pcnet32_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+static void pcnet32_get_strings(struct net_device *dev, u32 stringset,
+ u8 * data)
{
- memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test));
+ memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test));
}
static int pcnet32_self_test_count(struct net_device *dev)
{
- return PCNET32_TEST_LEN;
+ return PCNET32_TEST_LEN;
}
static void pcnet32_ethtool_test(struct net_device *dev,
- struct ethtool_test *test, u64 *data)
+ struct ethtool_test *test, u64 * data)
{
- struct pcnet32_private *lp = dev->priv;
- int rc;
-
- if (test->flags == ETH_TEST_FL_OFFLINE) {
- rc = pcnet32_loopback_test(dev, data);
- if (rc) {
- if (netif_msg_hw(lp))
- printk(KERN_DEBUG "%s: Loopback test failed.\n", dev->name);
- test->flags |= ETH_TEST_FL_FAILED;
+ struct pcnet32_private *lp = dev->priv;
+ int rc;
+
+ if (test->flags == ETH_TEST_FL_OFFLINE) {
+ rc = pcnet32_loopback_test(dev, data);
+ if (rc) {
+ if (netif_msg_hw(lp))
+ printk(KERN_DEBUG "%s: Loopback test failed.\n",
+ dev->name);
+ test->flags |= ETH_TEST_FL_FAILED;
+ } else if (netif_msg_hw(lp))
+ printk(KERN_DEBUG "%s: Loopback test passed.\n",
+ dev->name);
} else if (netif_msg_hw(lp))
- printk(KERN_DEBUG "%s: Loopback test passed.\n", dev->name);
- } else if (netif_msg_hw(lp))
- printk(KERN_DEBUG "%s: No tests to run (specify 'Offline' on ethtool).", dev->name);
-} /* end pcnet32_ethtool_test */
+ printk(KERN_DEBUG
+ "%s: No tests to run (specify 'Offline' on ethtool).",
+ dev->name);
+} /* end pcnet32_ethtool_test */
-static int pcnet32_loopback_test(struct net_device *dev, uint64_t *data1)
+static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
{
- struct pcnet32_private *lp = dev->priv;
- struct pcnet32_access *a = &lp->a; /* access to registers */
- ulong ioaddr = dev->base_addr; /* card base I/O address */
- struct sk_buff *skb; /* sk buff */
- int x, i; /* counters */
- int numbuffs = 4; /* number of TX/RX buffers and descs */
- u16 status = 0x8300; /* TX ring status */
- u16 teststatus; /* test of ring status */
- int rc; /* return code */
- int size; /* size of packets */
- unsigned char *packet; /* source packet data */
- static const int data_len = 60; /* length of source packets */
- unsigned long flags;
- unsigned long ticks;
-
- *data1 = 1; /* status of test, default to fail */
- rc = 1; /* default to fail */
-
- if (netif_running(dev))
- pcnet32_close(dev);
-
- spin_lock_irqsave(&lp->lock, flags);
-
- /* Reset the PCNET32 */
- lp->a.reset (ioaddr);
-
- /* switch pcnet32 to 32bit mode */
- lp->a.write_bcr (ioaddr, 20, 2);
-
- lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
- lp->init_block.filter[0] = 0;
- lp->init_block.filter[1] = 0;
-
- /* purge & init rings but don't actually restart */
- pcnet32_restart(dev, 0x0000);
-
- lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */
-
- /* Initialize Transmit buffers. */
- size = data_len + 15;
- for (x=0; x<numbuffs; x++) {
- if (!(skb = dev_alloc_skb(size))) {
- if (netif_msg_hw(lp))
- printk(KERN_DEBUG "%s: Cannot allocate skb at line: %d!\n",
- dev->name, __LINE__);
- goto clean_up;
- } else {
- packet = skb->data;
- skb_put(skb, size); /* create space for data */
- lp->tx_skbuff[x] = skb;
- lp->tx_ring[x].length = le16_to_cpu(-skb->len);
- lp->tx_ring[x].misc = 0;
-
- /* put DA and SA into the skb */
- for (i=0; i<6; i++)
- *packet++ = dev->dev_addr[i];
- for (i=0; i<6; i++)
- *packet++ = dev->dev_addr[i];
- /* type */
- *packet++ = 0x08;
- *packet++ = 0x06;
- /* packet number */
- *packet++ = x;
- /* fill packet with data */
- for (i=0; i<data_len; i++)
- *packet++ = i;
-
- lp->tx_dma_addr[x] = pci_map_single(lp->pci_dev, skb->data,
- skb->len, PCI_DMA_TODEVICE);
- lp->tx_ring[x].base = (u32)le32_to_cpu(lp->tx_dma_addr[x]);
- wmb(); /* Make sure owner changes after all others are visible */
- lp->tx_ring[x].status = le16_to_cpu(status);
- }
- }
-
- x = a->read_bcr(ioaddr, 32); /* set internal loopback in BSR32 */
- x = x | 0x0002;
- a->write_bcr(ioaddr, 32, x);
-
- lp->a.write_csr (ioaddr, 15, 0x0044); /* set int loopback in CSR15 */
-
- teststatus = le16_to_cpu(0x8000);
- lp->a.write_csr(ioaddr, 0, 0x0002); /* Set STRT bit */
-
- /* Check status of descriptors */
- for (x=0; x<numbuffs; x++) {
- ticks = 0;
- rmb();
- while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) {
- spin_unlock_irqrestore(&lp->lock, flags);
- mdelay(1);
- spin_lock_irqsave(&lp->lock, flags);
- rmb();
- ticks++;
- }
- if (ticks == 200) {
- if (netif_msg_hw(lp))
- printk("%s: Desc %d failed to reset!\n",dev->name,x);
- break;
- }
- }
-
- lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */
- wmb();
- if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
- printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
-
- for (x=0; x<numbuffs; x++) {
- printk(KERN_DEBUG "%s: Packet %d:\n", dev->name, x);
- skb = lp->rx_skbuff[x];
- for (i=0; i<size; i++) {
- printk("%02x ", *(skb->data+i));
- }
- printk("\n");
- }
- }
-
- x = 0;
- rc = 0;
- while (x<numbuffs && !rc) {
- skb = lp->rx_skbuff[x];
- packet = lp->tx_skbuff[x]->data;
- for (i=0; i<size; i++) {
- if (*(skb->data+i) != packet[i]) {
- if (netif_msg_hw(lp))
- printk(KERN_DEBUG "%s: Error in compare! %2x - %02x %02x\n",
- dev->name, i, *(skb->data+i), packet[i]);
- rc = 1;
- break;
- }
+ struct pcnet32_private *lp = dev->priv;
+ struct pcnet32_access *a = &lp->a; /* access to registers */
+ ulong ioaddr = dev->base_addr; /* card base I/O address */
+ struct sk_buff *skb; /* sk buff */
+ int x, i; /* counters */
+ int numbuffs = 4; /* number of TX/RX buffers and descs */
+ u16 status = 0x8300; /* TX ring status */
+ u16 teststatus; /* test of ring status */
+ int rc; /* return code */
+ int size; /* size of packets */
+ unsigned char *packet; /* source packet data */
+ static const int data_len = 60; /* length of source packets */
+ unsigned long flags;
+ unsigned long ticks;
+
+ *data1 = 1; /* status of test, default to fail */
+ rc = 1; /* default to fail */
+
+ if (netif_running(dev))
+ pcnet32_close(dev);
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ /* Reset the PCNET32 */
+ lp->a.reset(ioaddr);
+
+ /* switch pcnet32 to 32bit mode */
+ lp->a.write_bcr(ioaddr, 20, 2);
+
+ lp->init_block.mode =
+ le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
+ lp->init_block.filter[0] = 0;
+ lp->init_block.filter[1] = 0;
+
+ /* purge & init rings but don't actually restart */
+ pcnet32_restart(dev, 0x0000);
+
+ lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */
+
+ /* Initialize Transmit buffers. */
+ size = data_len + 15;
+ for (x = 0; x < numbuffs; x++) {
+ if (!(skb = dev_alloc_skb(size))) {
+ if (netif_msg_hw(lp))
+ printk(KERN_DEBUG
+ "%s: Cannot allocate skb at line: %d!\n",
+ dev->name, __LINE__);
+ goto clean_up;
+ } else {
+ packet = skb->data;
+ skb_put(skb, size); /* create space for data */
+ lp->tx_skbuff[x] = skb;
+ lp->tx_ring[x].length = le16_to_cpu(-skb->len);
+ lp->tx_ring[x].misc = 0;
+
+ /* put DA and SA into the skb */
+ for (i = 0; i < 6; i++)
+ *packet++ = dev->dev_addr[i];
+ for (i = 0; i < 6; i++)
+ *packet++ = dev->dev_addr[i];
+ /* type */
+ *packet++ = 0x08;
+ *packet++ = 0x06;
+ /* packet number */
+ *packet++ = x;
+ /* fill packet with data */
+ for (i = 0; i < data_len; i++)
+ *packet++ = i;
+
+ lp->tx_dma_addr[x] =
+ pci_map_single(lp->pci_dev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ lp->tx_ring[x].base =
+ (u32) le32_to_cpu(lp->tx_dma_addr[x]);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->tx_ring[x].status = le16_to_cpu(status);
+ }
+ }
+
+ x = a->read_bcr(ioaddr, 32); /* set internal loopback in BSR32 */
+ x = x | 0x0002;
+ a->write_bcr(ioaddr, 32, x);
+
+ lp->a.write_csr(ioaddr, 15, 0x0044); /* set int loopback in CSR15 */
+
+ teststatus = le16_to_cpu(0x8000);
+ lp->a.write_csr(ioaddr, 0, 0x0002); /* Set STRT bit */
+
+ /* Check status of descriptors */
+ for (x = 0; x < numbuffs; x++) {
+ ticks = 0;
+ rmb();
+ while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) {
+ spin_unlock_irqrestore(&lp->lock, flags);
+ mdelay(1);
+ spin_lock_irqsave(&lp->lock, flags);
+ rmb();
+ ticks++;
+ }
+ if (ticks == 200) {
+ if (netif_msg_hw(lp))
+ printk("%s: Desc %d failed to reset!\n",
+ dev->name, x);
+ break;
+ }
+ }
+
+ lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */
+ wmb();
+ if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
+ printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
+
+ for (x = 0; x < numbuffs; x++) {
+ printk(KERN_DEBUG "%s: Packet %d:\n", dev->name, x);
+ skb = lp->rx_skbuff[x];
+ for (i = 0; i < size; i++) {
+ printk("%02x ", *(skb->data + i));
+ }
+ printk("\n");
+ }
+ }
+
+ x = 0;
+ rc = 0;
+ while (x < numbuffs && !rc) {
+ skb = lp->rx_skbuff[x];
+ packet = lp->tx_skbuff[x]->data;
+ for (i = 0; i < size; i++) {
+ if (*(skb->data + i) != packet[i]) {
+ if (netif_msg_hw(lp))
+ printk(KERN_DEBUG
+ "%s: Error in compare! %2x - %02x %02x\n",
+ dev->name, i, *(skb->data + i),
+ packet[i]);
+ rc = 1;
+ break;
+ }
+ }
+ x++;
+ }
+ if (!rc) {
+ *data1 = 0;
}
- x++;
- }
- if (!rc) {
- *data1 = 0;
- }
-clean_up:
- pcnet32_purge_tx_ring(dev);
- x = a->read_csr(ioaddr, 15) & 0xFFFF;
- a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */
+ clean_up:
+ pcnet32_purge_tx_ring(dev);
+ x = a->read_csr(ioaddr, 15) & 0xFFFF;
+ a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */
- x = a->read_bcr(ioaddr, 32); /* reset internal loopback */
- x = x & ~0x0002;
- a->write_bcr(ioaddr, 32, x);
+ x = a->read_bcr(ioaddr, 32); /* reset internal loopback */
+ x = x & ~0x0002;
+ a->write_bcr(ioaddr, 32, x);
- spin_unlock_irqrestore(&lp->lock, flags);
+ spin_unlock_irqrestore(&lp->lock, flags);
- if (netif_running(dev)) {
- pcnet32_open(dev);
- } else {
- lp->a.write_bcr (ioaddr, 20, 4); /* return to 16bit mode */
- }
+ if (netif_running(dev)) {
+ pcnet32_open(dev);
+ } else {
+ lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
+ }
- return(rc);
-} /* end pcnet32_loopback_test */
+ return (rc);
+} /* end pcnet32_loopback_test */
static void pcnet32_led_blink_callback(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- struct pcnet32_access *a = &lp->a;
- ulong ioaddr = dev->base_addr;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&lp->lock, flags);
- for (i=4; i<8; i++) {
- a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
- }
- spin_unlock_irqrestore(&lp->lock, flags);
-
- mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT);
+ struct pcnet32_private *lp = dev->priv;
+ struct pcnet32_access *a = &lp->a;
+ ulong ioaddr = dev->base_addr;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ for (i = 4; i < 8; i++) {
+ a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT);
}
static int pcnet32_phys_id(struct net_device *dev, u32 data)
{
- struct pcnet32_private *lp = dev->priv;
- struct pcnet32_access *a = &lp->a;
- ulong ioaddr = dev->base_addr;
- unsigned long flags;
- int i, regs[4];
-
- if (!lp->blink_timer.function) {
- init_timer(&lp->blink_timer);
- lp->blink_timer.function = (void *) pcnet32_led_blink_callback;
- lp->blink_timer.data = (unsigned long) dev;
- }
-
- /* Save the current value of the bcrs */
- spin_lock_irqsave(&lp->lock, flags);
- for (i=4; i<8; i++) {
- regs[i-4] = a->read_bcr(ioaddr, i);
- }
- spin_unlock_irqrestore(&lp->lock, flags);
-
- mod_timer(&lp->blink_timer, jiffies);
- set_current_state(TASK_INTERRUPTIBLE);
-
- if ((!data) || (data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)))
- data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
-
- msleep_interruptible(data * 1000);
- del_timer_sync(&lp->blink_timer);
-
- /* Restore the original value of the bcrs */
- spin_lock_irqsave(&lp->lock, flags);
- for (i=4; i<8; i++) {
- a->write_bcr(ioaddr, i, regs[i-4]);
- }
- spin_unlock_irqrestore(&lp->lock, flags);
-
- return 0;
+ struct pcnet32_private *lp = dev->priv;
+ struct pcnet32_access *a = &lp->a;
+ ulong ioaddr = dev->base_addr;
+ unsigned long flags;
+ int i, regs[4];
+
+ if (!lp->blink_timer.function) {
+ init_timer(&lp->blink_timer);
+ lp->blink_timer.function = (void *)pcnet32_led_blink_callback;
+ lp->blink_timer.data = (unsigned long)dev;
+ }
+
+ /* Save the current value of the bcrs */
+ spin_lock_irqsave(&lp->lock, flags);
+ for (i = 4; i < 8; i++) {
+ regs[i - 4] = a->read_bcr(ioaddr, i);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ mod_timer(&lp->blink_timer, jiffies);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if ((!data) || (data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ)))
+ data = (u32) (MAX_SCHEDULE_TIMEOUT / HZ);
+
+ msleep_interruptible(data * 1000);
+ del_timer_sync(&lp->blink_timer);
+
+ /* Restore the original value of the bcrs */
+ spin_lock_irqsave(&lp->lock, flags);
+ for (i = 4; i < 8; i++) {
+ a->write_bcr(ioaddr, i, regs[i - 4]);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return 0;
}
+#define PCNET32_REGS_PER_PHY 32
+#define PCNET32_MAX_PHYS 32
static int pcnet32_get_regs_len(struct net_device *dev)
{
- return(PCNET32_NUM_REGS * sizeof(u16));
+ struct pcnet32_private *lp = dev->priv;
+ int j = lp->phycount * PCNET32_REGS_PER_PHY;
+
+ return ((PCNET32_NUM_REGS + j) * sizeof(u16));
}
static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
- void *ptr)
+ void *ptr)
{
- int i, csr0;
- u16 *buff = ptr;
- struct pcnet32_private *lp = dev->priv;
- struct pcnet32_access *a = &lp->a;
- ulong ioaddr = dev->base_addr;
- int ticks;
- unsigned long flags;
-
- spin_lock_irqsave(&lp->lock, flags);
-
- csr0 = a->read_csr(ioaddr, 0);
- if (!(csr0 & 0x0004)) { /* If not stopped */
- /* set SUSPEND (SPND) - CSR5 bit 0 */
- a->write_csr(ioaddr, 5, 0x0001);
-
- /* poll waiting for bit to be set */
- ticks = 0;
- while (!(a->read_csr(ioaddr, 5) & 0x0001)) {
- spin_unlock_irqrestore(&lp->lock, flags);
- mdelay(1);
- spin_lock_irqsave(&lp->lock, flags);
- ticks++;
- if (ticks > 200) {
- if (netif_msg_hw(lp))
- printk(KERN_DEBUG "%s: Error getting into suspend!\n",
- dev->name);
- break;
- }
- }
- }
+ int i, csr0;
+ u16 *buff = ptr;
+ struct pcnet32_private *lp = dev->priv;
+ struct pcnet32_access *a = &lp->a;
+ ulong ioaddr = dev->base_addr;
+ int ticks;
+ unsigned long flags;
- /* read address PROM */
- for (i=0; i<16; i += 2)
- *buff++ = inw(ioaddr + i);
+ spin_lock_irqsave(&lp->lock, flags);
- /* read control and status registers */
- for (i=0; i<90; i++) {
- *buff++ = a->read_csr(ioaddr, i);
- }
+ csr0 = a->read_csr(ioaddr, 0);
+ if (!(csr0 & 0x0004)) { /* If not stopped */
+ /* set SUSPEND (SPND) - CSR5 bit 0 */
+ a->write_csr(ioaddr, 5, 0x0001);
+
+ /* poll waiting for bit to be set */
+ ticks = 0;
+ while (!(a->read_csr(ioaddr, 5) & 0x0001)) {
+ spin_unlock_irqrestore(&lp->lock, flags);
+ mdelay(1);
+ spin_lock_irqsave(&lp->lock, flags);
+ ticks++;
+ if (ticks > 200) {
+ if (netif_msg_hw(lp))
+ printk(KERN_DEBUG
+ "%s: Error getting into suspend!\n",
+ dev->name);
+ break;
+ }
+ }
+ }
- *buff++ = a->read_csr(ioaddr, 112);
- *buff++ = a->read_csr(ioaddr, 114);
+ /* read address PROM */
+ for (i = 0; i < 16; i += 2)
+ *buff++ = inw(ioaddr + i);
- /* read bus configuration registers */
- for (i=0; i<30; i++) {
- *buff++ = a->read_bcr(ioaddr, i);
- }
- *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */
- for (i=31; i<36; i++) {
- *buff++ = a->read_bcr(ioaddr, i);
- }
+ /* read control and status registers */
+ for (i = 0; i < 90; i++) {
+ *buff++ = a->read_csr(ioaddr, i);
+ }
+
+ *buff++ = a->read_csr(ioaddr, 112);
+ *buff++ = a->read_csr(ioaddr, 114);
- /* read mii phy registers */
- if (lp->mii) {
- for (i=0; i<32; i++) {
- lp->a.write_bcr(ioaddr, 33, ((lp->mii_if.phy_id) << 5) | i);
- *buff++ = lp->a.read_bcr(ioaddr, 34);
+ /* read bus configuration registers */
+ for (i = 0; i < 30; i++) {
+ *buff++ = a->read_bcr(ioaddr, i);
+ }
+ *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */
+ for (i = 31; i < 36; i++) {
+ *buff++ = a->read_bcr(ioaddr, i);
}
- }
- if (!(csr0 & 0x0004)) { /* If not stopped */
- /* clear SUSPEND (SPND) - CSR5 bit 0 */
- a->write_csr(ioaddr, 5, 0x0000);
- }
+ /* read mii phy registers */
+ if (lp->mii) {
+ int j;
+ for (j = 0; j < PCNET32_MAX_PHYS; j++) {
+ if (lp->phymask & (1 << j)) {
+ for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
+ lp->a.write_bcr(ioaddr, 33,
+ (j << 5) | i);
+ *buff++ = lp->a.read_bcr(ioaddr, 34);
+ }
+ }
+ }
+ }
- i = buff - (u16 *)ptr;
- for (; i < PCNET32_NUM_REGS; i++)
- *buff++ = 0;
+ if (!(csr0 & 0x0004)) { /* If not stopped */
+ /* clear SUSPEND (SPND) - CSR5 bit 0 */
+ a->write_csr(ioaddr, 5, 0x0000);
+ }
- spin_unlock_irqrestore(&lp->lock, flags);
+ spin_unlock_irqrestore(&lp->lock, flags);
}
static struct ethtool_ops pcnet32_ethtool_ops = {
- .get_settings = pcnet32_get_settings,
- .set_settings = pcnet32_set_settings,
- .get_drvinfo = pcnet32_get_drvinfo,
- .get_msglevel = pcnet32_get_msglevel,
- .set_msglevel = pcnet32_set_msglevel,
- .nway_reset = pcnet32_nway_reset,
- .get_link = pcnet32_get_link,
- .get_ringparam = pcnet32_get_ringparam,
- .set_ringparam = pcnet32_set_ringparam,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .get_tso = ethtool_op_get_tso,
- .get_strings = pcnet32_get_strings,
- .self_test_count = pcnet32_self_test_count,
- .self_test = pcnet32_ethtool_test,
- .phys_id = pcnet32_phys_id,
- .get_regs_len = pcnet32_get_regs_len,
- .get_regs = pcnet32_get_regs,
- .get_perm_addr = ethtool_op_get_perm_addr,
+ .get_settings = pcnet32_get_settings,
+ .set_settings = pcnet32_set_settings,
+ .get_drvinfo = pcnet32_get_drvinfo,
+ .get_msglevel = pcnet32_get_msglevel,
+ .set_msglevel = pcnet32_set_msglevel,
+ .nway_reset = pcnet32_nway_reset,
+ .get_link = pcnet32_get_link,
+ .get_ringparam = pcnet32_get_ringparam,
+ .set_ringparam = pcnet32_set_ringparam,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .get_tso = ethtool_op_get_tso,
+ .get_strings = pcnet32_get_strings,
+ .self_test_count = pcnet32_self_test_count,
+ .self_test = pcnet32_ethtool_test,
+ .phys_id = pcnet32_phys_id,
+ .get_regs_len = pcnet32_get_regs_len,
+ .get_regs = pcnet32_get_regs,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
/* only probes for non-PCI devices, the rest are handled by
* pci_register_driver via pcnet32_probe_pci */
-static void __devinit
-pcnet32_probe_vlbus(void)
+static void __devinit pcnet32_probe_vlbus(void)
{
- unsigned int *port, ioaddr;
-
- /* search for PCnet32 VLB cards at known addresses */
- for (port = pcnet32_portlist; (ioaddr = *port); port++) {
- if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) {
- /* check if there is really a pcnet chip on that ioaddr */
- if ((inb(ioaddr + 14) == 0x57) && (inb(ioaddr + 15) == 0x57)) {
- pcnet32_probe1(ioaddr, 0, NULL);
- } else {
- release_region(ioaddr, PCNET32_TOTAL_SIZE);
- }
- }
- }
+ unsigned int *port, ioaddr;
+
+ /* search for PCnet32 VLB cards at known addresses */
+ for (port = pcnet32_portlist; (ioaddr = *port); port++) {
+ if (request_region
+ (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) {
+ /* check if there is really a pcnet chip on that ioaddr */
+ if ((inb(ioaddr + 14) == 0x57)
+ && (inb(ioaddr + 15) == 0x57)) {
+ pcnet32_probe1(ioaddr, 0, NULL);
+ } else {
+ release_region(ioaddr, PCNET32_TOTAL_SIZE);
+ }
+ }
+ }
}
-
static int __devinit
pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- unsigned long ioaddr;
- int err;
-
- err = pci_enable_device(pdev);
- if (err < 0) {
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX "failed to enable device -- err=%d\n", err);
- return err;
- }
- pci_set_master(pdev);
+ unsigned long ioaddr;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err < 0) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_ERR PFX
+ "failed to enable device -- err=%d\n", err);
+ return err;
+ }
+ pci_set_master(pdev);
+
+ ioaddr = pci_resource_start(pdev, 0);
+ if (!ioaddr) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_ERR PFX
+ "card has no PCI IO resources, aborting\n");
+ return -ENODEV;
+ }
- ioaddr = pci_resource_start (pdev, 0);
- if (!ioaddr) {
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk (KERN_ERR PFX "card has no PCI IO resources, aborting\n");
- return -ENODEV;
- }
+ if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_ERR PFX
+ "architecture does not support 32bit PCI busmaster DMA\n");
+ return -ENODEV;
+ }
+ if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci") ==
+ NULL) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_ERR PFX
+ "io address range already allocated\n");
+ return -EBUSY;
+ }
- if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) {
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX "architecture does not support 32bit PCI busmaster DMA\n");
- return -ENODEV;
- }
- if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci") == NULL) {
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX "io address range already allocated\n");
- return -EBUSY;
- }
-
- err = pcnet32_probe1(ioaddr, 1, pdev);
- if (err < 0) {
- pci_disable_device(pdev);
- }
- return err;
+ err = pcnet32_probe1(ioaddr, 1, pdev);
+ if (err < 0) {
+ pci_disable_device(pdev);
+ }
+ return err;
}
-
/* pcnet32_probe1
* Called from both pcnet32_probe_vlbus and pcnet_probe_pci.
* pdev will be NULL when called from pcnet32_probe_vlbus.
@@ -1107,630 +1028,764 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
static int __devinit
pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
{
- struct pcnet32_private *lp;
- dma_addr_t lp_dma_addr;
- int i, media;
- int fdx, mii, fset, dxsuflo;
- int chip_version;
- char *chipname;
- struct net_device *dev;
- struct pcnet32_access *a = NULL;
- u8 promaddr[6];
- int ret = -ENODEV;
-
- /* reset the chip */
- pcnet32_wio_reset(ioaddr);
-
- /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */
- if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) {
- a = &pcnet32_wio;
- } else {
- pcnet32_dwio_reset(ioaddr);
- if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 && pcnet32_dwio_check(ioaddr)) {
- a = &pcnet32_dwio;
- } else
- goto err_release_region;
- }
-
- chip_version = a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr,89) << 16);
- if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW))
- printk(KERN_INFO " PCnet chip version is %#x.\n", chip_version);
- if ((chip_version & 0xfff) != 0x003) {
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_INFO PFX "Unsupported chip version.\n");
- goto err_release_region;
- }
-
- /* initialize variables */
- fdx = mii = fset = dxsuflo = 0;
- chip_version = (chip_version >> 12) & 0xffff;
-
- switch (chip_version) {
- case 0x2420:
- chipname = "PCnet/PCI 79C970"; /* PCI */
- break;
- case 0x2430:
- if (shared)
- chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */
- else
- chipname = "PCnet/32 79C965"; /* 486/VL bus */
- break;
- case 0x2621:
- chipname = "PCnet/PCI II 79C970A"; /* PCI */
- fdx = 1;
- break;
- case 0x2623:
- chipname = "PCnet/FAST 79C971"; /* PCI */
- fdx = 1; mii = 1; fset = 1;
- break;
- case 0x2624:
- chipname = "PCnet/FAST+ 79C972"; /* PCI */
- fdx = 1; mii = 1; fset = 1;
- break;
- case 0x2625:
- chipname = "PCnet/FAST III 79C973"; /* PCI */
- fdx = 1; mii = 1;
- break;
- case 0x2626:
- chipname = "PCnet/Home 79C978"; /* PCI */
- fdx = 1;
+ struct pcnet32_private *lp;
+ dma_addr_t lp_dma_addr;
+ int i, media;
+ int fdx, mii, fset, dxsuflo;
+ int chip_version;
+ char *chipname;
+ struct net_device *dev;
+ struct pcnet32_access *a = NULL;
+ u8 promaddr[6];
+ int ret = -ENODEV;
+
+ /* reset the chip */
+ pcnet32_wio_reset(ioaddr);
+
+ /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */
+ if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) {
+ a = &pcnet32_wio;
+ } else {
+ pcnet32_dwio_reset(ioaddr);
+ if (pcnet32_dwio_read_csr(ioaddr, 0) == 4
+ && pcnet32_dwio_check(ioaddr)) {
+ a = &pcnet32_dwio;
+ } else
+ goto err_release_region;
+ }
+
+ chip_version =
+ a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16);
+ if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW))
+ printk(KERN_INFO " PCnet chip version is %#x.\n",
+ chip_version);
+ if ((chip_version & 0xfff) != 0x003) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_INFO PFX "Unsupported chip version.\n");
+ goto err_release_region;
+ }
+
+ /* initialize variables */
+ fdx = mii = fset = dxsuflo = 0;
+ chip_version = (chip_version >> 12) & 0xffff;
+
+ switch (chip_version) {
+ case 0x2420:
+ chipname = "PCnet/PCI 79C970"; /* PCI */
+ break;
+ case 0x2430:
+ if (shared)
+ chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */
+ else
+ chipname = "PCnet/32 79C965"; /* 486/VL bus */
+ break;
+ case 0x2621:
+ chipname = "PCnet/PCI II 79C970A"; /* PCI */
+ fdx = 1;
+ break;
+ case 0x2623:
+ chipname = "PCnet/FAST 79C971"; /* PCI */
+ fdx = 1;
+ mii = 1;
+ fset = 1;
+ break;
+ case 0x2624:
+ chipname = "PCnet/FAST+ 79C972"; /* PCI */
+ fdx = 1;
+ mii = 1;
+ fset = 1;
+ break;
+ case 0x2625:
+ chipname = "PCnet/FAST III 79C973"; /* PCI */
+ fdx = 1;
+ mii = 1;
+ break;
+ case 0x2626:
+ chipname = "PCnet/Home 79C978"; /* PCI */
+ fdx = 1;
+ /*
+ * This is based on specs published at www.amd.com. This section
+ * assumes that a card with a 79C978 wants to go into standard
+ * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode,
+ * and the module option homepna=1 can select this instead.
+ */
+ media = a->read_bcr(ioaddr, 49);
+ media &= ~3; /* default to 10Mb ethernet */
+ if (cards_found < MAX_UNITS && homepna[cards_found])
+ media |= 1; /* switch to home wiring mode */
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_DEBUG PFX "media set to %sMbit mode.\n",
+ (media & 1) ? "1" : "10");
+ a->write_bcr(ioaddr, 49, media);
+ break;
+ case 0x2627:
+ chipname = "PCnet/FAST III 79C975"; /* PCI */
+ fdx = 1;
+ mii = 1;
+ break;
+ case 0x2628:
+ chipname = "PCnet/PRO 79C976";
+ fdx = 1;
+ mii = 1;
+ break;
+ default:
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_INFO PFX
+ "PCnet version %#x, no PCnet32 chip.\n",
+ chip_version);
+ goto err_release_region;
+ }
+
/*
- * This is based on specs published at www.amd.com. This section
- * assumes that a card with a 79C978 wants to go into standard
- * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode,
- * and the module option homepna=1 can select this instead.
+ * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit
+ * starting until the packet is loaded. Strike one for reliability, lose
+ * one for latency - although on PCI this isnt a big loss. Older chips
+ * have FIFO's smaller than a packet, so you can't do this.
+ * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn.
*/
- media = a->read_bcr(ioaddr, 49);
- media &= ~3; /* default to 10Mb ethernet */
- if (cards_found < MAX_UNITS && homepna[cards_found])
- media |= 1; /* switch to home wiring mode */
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_DEBUG PFX "media set to %sMbit mode.\n",
- (media & 1) ? "1" : "10");
- a->write_bcr(ioaddr, 49, media);
- break;
- case 0x2627:
- chipname = "PCnet/FAST III 79C975"; /* PCI */
- fdx = 1; mii = 1;
- break;
- case 0x2628:
- chipname = "PCnet/PRO 79C976";
- fdx = 1; mii = 1;
- break;
- default:
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_INFO PFX "PCnet version %#x, no PCnet32 chip.\n",
- chip_version);
- goto err_release_region;
- }
-
- /*
- * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit
- * starting until the packet is loaded. Strike one for reliability, lose
- * one for latency - although on PCI this isnt a big loss. Older chips
- * have FIFO's smaller than a packet, so you can't do this.
- * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn.
- */
-
- if (fset) {
- a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860));
- a->write_csr(ioaddr, 80, (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00);
- dxsuflo = 1;
- }
-
- dev = alloc_etherdev(0);
- if (!dev) {
+
+ if (fset) {
+ a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860));
+ a->write_csr(ioaddr, 80,
+ (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00);
+ dxsuflo = 1;
+ }
+
+ dev = alloc_etherdev(0);
+ if (!dev) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_ERR PFX "Memory allocation failed.\n");
+ ret = -ENOMEM;
+ goto err_release_region;
+ }
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX "Memory allocation failed.\n");
- ret = -ENOMEM;
- goto err_release_region;
- }
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr);
-
- /* In most chips, after a chip reset, the ethernet address is read from the
- * station address PROM at the base address and programmed into the
- * "Physical Address Registers" CSR12-14.
- * As a precautionary measure, we read the PROM values and complain if
- * they disagree with the CSRs. Either way, we use the CSR values, and
- * double check that they are valid.
- */
- for (i = 0; i < 3; i++) {
- unsigned int val;
- val = a->read_csr(ioaddr, i+12) & 0x0ffff;
- /* There may be endianness issues here. */
- dev->dev_addr[2*i] = val & 0x0ff;
- dev->dev_addr[2*i+1] = (val >> 8) & 0x0ff;
- }
-
- /* read PROM address and compare with CSR address */
- for (i = 0; i < 6; i++)
- promaddr[i] = inb(ioaddr + i);
-
- if (memcmp(promaddr, dev->dev_addr, 6)
- || !is_valid_ether_addr(dev->dev_addr)) {
- if (is_valid_ether_addr(promaddr)) {
- if (pcnet32_debug & NETIF_MSG_PROBE) {
- printk(" warning: CSR address invalid,\n");
- printk(KERN_INFO " using instead PROM address of");
- }
- memcpy(dev->dev_addr, promaddr, 6);
- }
- }
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
-
- /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
- if (!is_valid_ether_addr(dev->perm_addr))
- memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
-
- if (pcnet32_debug & NETIF_MSG_PROBE) {
+ printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr);
+
+ /* In most chips, after a chip reset, the ethernet address is read from the
+ * station address PROM at the base address and programmed into the
+ * "Physical Address Registers" CSR12-14.
+ * As a precautionary measure, we read the PROM values and complain if
+ * they disagree with the CSRs. Either way, we use the CSR values, and
+ * double check that they are valid.
+ */
+ for (i = 0; i < 3; i++) {
+ unsigned int val;
+ val = a->read_csr(ioaddr, i + 12) & 0x0ffff;
+ /* There may be endianness issues here. */
+ dev->dev_addr[2 * i] = val & 0x0ff;
+ dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff;
+ }
+
+ /* read PROM address and compare with CSR address */
for (i = 0; i < 6; i++)
- printk(" %2.2x", dev->dev_addr[i]);
-
- /* Version 0x2623 and 0x2624 */
- if (((chip_version + 1) & 0xfffe) == 0x2624) {
- i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
- printk("\n" KERN_INFO " tx_start_pt(0x%04x):",i);
- switch(i>>10) {
- case 0: printk(" 20 bytes,"); break;
- case 1: printk(" 64 bytes,"); break;
- case 2: printk(" 128 bytes,"); break;
- case 3: printk("~220 bytes,"); break;
- }
- i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
- printk(" BCR18(%x):",i&0xffff);
- if (i & (1<<5)) printk("BurstWrEn ");
- if (i & (1<<6)) printk("BurstRdEn ");
- if (i & (1<<7)) printk("DWordIO ");
- if (i & (1<<11)) printk("NoUFlow ");
- i = a->read_bcr(ioaddr, 25);
- printk("\n" KERN_INFO " SRAMSIZE=0x%04x,",i<<8);
- i = a->read_bcr(ioaddr, 26);
- printk(" SRAM_BND=0x%04x,",i<<8);
- i = a->read_bcr(ioaddr, 27);
- if (i & (1<<14)) printk("LowLatRx");
- }
- }
-
- dev->base_addr = ioaddr;
- /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */
- if ((lp = pci_alloc_consistent(pdev, sizeof(*lp), &lp_dma_addr)) == NULL) {
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX "Consistent memory allocation failed.\n");
- ret = -ENOMEM;
- goto err_free_netdev;
- }
-
- memset(lp, 0, sizeof(*lp));
- lp->dma_addr = lp_dma_addr;
- lp->pci_dev = pdev;
-
- spin_lock_init(&lp->lock);
-
- SET_MODULE_OWNER(dev);
- SET_NETDEV_DEV(dev, &pdev->dev);
- dev->priv = lp;
- lp->name = chipname;
- lp->shared_irq = shared;
- lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
- lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */
- lp->tx_mod_mask = lp->tx_ring_size - 1;
- lp->rx_mod_mask = lp->rx_ring_size - 1;
- lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12);
- lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4);
- lp->mii_if.full_duplex = fdx;
- lp->mii_if.phy_id_mask = 0x1f;
- lp->mii_if.reg_num_mask = 0x1f;
- lp->dxsuflo = dxsuflo;
- lp->mii = mii;
- lp->msg_enable = pcnet32_debug;
- if ((cards_found >= MAX_UNITS) || (options[cards_found] > sizeof(options_mapping)))
- lp->options = PCNET32_PORT_ASEL;
- else
- lp->options = options_mapping[options[cards_found]];
- lp->mii_if.dev = dev;
- lp->mii_if.mdio_read = mdio_read;
- lp->mii_if.mdio_write = mdio_write;
-
- if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
- ((cards_found>=MAX_UNITS) || full_duplex[cards_found]))
- lp->options |= PCNET32_PORT_FD;
-
- if (!a) {
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX "No access methods\n");
- ret = -ENODEV;
- goto err_free_consistent;
- }
- lp->a = *a;
-
- /* prior to register_netdev, dev->name is not yet correct */
- if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
- ret = -ENOMEM;
- goto err_free_ring;
- }
- /* detect special T1/E1 WAN card by checking for MAC address */
- if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0
+ promaddr[i] = inb(ioaddr + i);
+
+ if (memcmp(promaddr, dev->dev_addr, 6)
+ || !is_valid_ether_addr(dev->dev_addr)) {
+ if (is_valid_ether_addr(promaddr)) {
+ if (pcnet32_debug & NETIF_MSG_PROBE) {
+ printk(" warning: CSR address invalid,\n");
+ printk(KERN_INFO
+ " using instead PROM address of");
+ }
+ memcpy(dev->dev_addr, promaddr, 6);
+ }
+ }
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+ /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
+ if (!is_valid_ether_addr(dev->perm_addr))
+ memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
+
+ if (pcnet32_debug & NETIF_MSG_PROBE) {
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i]);
+
+ /* Version 0x2623 and 0x2624 */
+ if (((chip_version + 1) & 0xfffe) == 0x2624) {
+ i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
+ printk("\n" KERN_INFO " tx_start_pt(0x%04x):", i);
+ switch (i >> 10) {
+ case 0:
+ printk(" 20 bytes,");
+ break;
+ case 1:
+ printk(" 64 bytes,");
+ break;
+ case 2:
+ printk(" 128 bytes,");
+ break;
+ case 3:
+ printk("~220 bytes,");
+ break;
+ }
+ i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
+ printk(" BCR18(%x):", i & 0xffff);
+ if (i & (1 << 5))
+ printk("BurstWrEn ");
+ if (i & (1 << 6))
+ printk("BurstRdEn ");
+ if (i & (1 << 7))
+ printk("DWordIO ");
+ if (i & (1 << 11))
+ printk("NoUFlow ");
+ i = a->read_bcr(ioaddr, 25);
+ printk("\n" KERN_INFO " SRAMSIZE=0x%04x,", i << 8);
+ i = a->read_bcr(ioaddr, 26);
+ printk(" SRAM_BND=0x%04x,", i << 8);
+ i = a->read_bcr(ioaddr, 27);
+ if (i & (1 << 14))
+ printk("LowLatRx");
+ }
+ }
+
+ dev->base_addr = ioaddr;
+ /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */
+ if ((lp =
+ pci_alloc_consistent(pdev, sizeof(*lp), &lp_dma_addr)) == NULL) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_ERR PFX
+ "Consistent memory allocation failed.\n");
+ ret = -ENOMEM;
+ goto err_free_netdev;
+ }
+
+ memset(lp, 0, sizeof(*lp));
+ lp->dma_addr = lp_dma_addr;
+ lp->pci_dev = pdev;
+
+ spin_lock_init(&lp->lock);
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ dev->priv = lp;
+ lp->name = chipname;
+ lp->shared_irq = shared;
+ lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
+ lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */
+ lp->tx_mod_mask = lp->tx_ring_size - 1;
+ lp->rx_mod_mask = lp->rx_ring_size - 1;
+ lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12);
+ lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4);
+ lp->mii_if.full_duplex = fdx;
+ lp->mii_if.phy_id_mask = 0x1f;
+ lp->mii_if.reg_num_mask = 0x1f;
+ lp->dxsuflo = dxsuflo;
+ lp->mii = mii;
+ lp->msg_enable = pcnet32_debug;
+ if ((cards_found >= MAX_UNITS)
+ || (options[cards_found] > sizeof(options_mapping)))
+ lp->options = PCNET32_PORT_ASEL;
+ else
+ lp->options = options_mapping[options[cards_found]];
+ lp->mii_if.dev = dev;
+ lp->mii_if.mdio_read = mdio_read;
+ lp->mii_if.mdio_write = mdio_write;
+
+ if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
+ ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
+ lp->options |= PCNET32_PORT_FD;
+
+ if (!a) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_ERR PFX "No access methods\n");
+ ret = -ENODEV;
+ goto err_free_consistent;
+ }
+ lp->a = *a;
+
+ /* prior to register_netdev, dev->name is not yet correct */
+ if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
+ ret = -ENOMEM;
+ goto err_free_ring;
+ }
+ /* detect special T1/E1 WAN card by checking for MAC address */
+ if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0
&& dev->dev_addr[2] == 0x75)
- lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
-
- lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */
- lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
- for (i = 0; i < 6; i++)
- lp->init_block.phys_addr[i] = dev->dev_addr[i];
- lp->init_block.filter[0] = 0x00000000;
- lp->init_block.filter[1] = 0x00000000;
- lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr);
- lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr);
-
- /* switch pcnet32 to 32bit mode */
- a->write_bcr(ioaddr, 20, 2);
-
- a->write_csr(ioaddr, 1, (lp->dma_addr + offsetof(struct pcnet32_private,
- init_block)) & 0xffff);
- a->write_csr(ioaddr, 2, (lp->dma_addr + offsetof(struct pcnet32_private,
- init_block)) >> 16);
-
- if (pdev) { /* use the IRQ provided by PCI */
- dev->irq = pdev->irq;
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(" assigned IRQ %d.\n", dev->irq);
- } else {
- unsigned long irq_mask = probe_irq_on();
+ lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
- /*
- * To auto-IRQ we enable the initialization-done and DMA error
- * interrupts. For ISA boards we get a DMA error, but VLB and PCI
- * boards will work.
- */
- /* Trigger an initialization just for the interrupt. */
- a->write_csr (ioaddr, 0, 0x41);
- mdelay (1);
+ lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */
+ lp->init_block.tlen_rlen =
+ le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+ lp->init_block.rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr);
+ lp->init_block.tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr);
+
+ /* switch pcnet32 to 32bit mode */
+ a->write_bcr(ioaddr, 20, 2);
+
+ a->write_csr(ioaddr, 1, (lp->dma_addr + offsetof(struct pcnet32_private,
+ init_block)) & 0xffff);
+ a->write_csr(ioaddr, 2, (lp->dma_addr + offsetof(struct pcnet32_private,
+ init_block)) >> 16);
+
+ if (pdev) { /* use the IRQ provided by PCI */
+ dev->irq = pdev->irq;
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(" assigned IRQ %d.\n", dev->irq);
+ } else {
+ unsigned long irq_mask = probe_irq_on();
+
+ /*
+ * To auto-IRQ we enable the initialization-done and DMA error
+ * interrupts. For ISA boards we get a DMA error, but VLB and PCI
+ * boards will work.
+ */
+ /* Trigger an initialization just for the interrupt. */
+ a->write_csr(ioaddr, 0, 0x41);
+ mdelay(1);
+
+ dev->irq = probe_irq_off(irq_mask);
+ if (!dev->irq) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(", failed to detect IRQ line.\n");
+ ret = -ENODEV;
+ goto err_free_ring;
+ }
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(", probed IRQ %d.\n", dev->irq);
+ }
- dev->irq = probe_irq_off (irq_mask);
- if (!dev->irq) {
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(", failed to detect IRQ line.\n");
- ret = -ENODEV;
- goto err_free_ring;
+ /* Set the mii phy_id so that we can query the link state */
+ if (lp->mii) {
+ /* lp->phycount and lp->phymask are set to 0 by memset above */
+
+ lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
+ /* scan for PHYs */
+ for (i = 0; i < PCNET32_MAX_PHYS; i++) {
+ unsigned short id1, id2;
+
+ id1 = mdio_read(dev, i, MII_PHYSID1);
+ if (id1 == 0xffff)
+ continue;
+ id2 = mdio_read(dev, i, MII_PHYSID2);
+ if (id2 == 0xffff)
+ continue;
+ if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624)
+ continue; /* 79C971 & 79C972 have phantom phy at id 31 */
+ lp->phycount++;
+ lp->phymask |= (1 << i);
+ lp->mii_if.phy_id = i;
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_INFO PFX
+ "Found PHY %04x:%04x at address %d.\n",
+ id1, id2, i);
+ }
+ lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
+ if (lp->phycount > 1) {
+ lp->options |= PCNET32_PORT_MII;
+ }
}
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(", probed IRQ %d.\n", dev->irq);
- }
-
- /* Set the mii phy_id so that we can query the link state */
- if (lp->mii)
- lp->mii_if.phy_id = ((lp->a.read_bcr (ioaddr, 33)) >> 5) & 0x1f;
-
- init_timer (&lp->watchdog_timer);
- lp->watchdog_timer.data = (unsigned long) dev;
- lp->watchdog_timer.function = (void *) &pcnet32_watchdog;
-
- /* The PCNET32-specific entries in the device structure. */
- dev->open = &pcnet32_open;
- dev->hard_start_xmit = &pcnet32_start_xmit;
- dev->stop = &pcnet32_close;
- dev->get_stats = &pcnet32_get_stats;
- dev->set_multicast_list = &pcnet32_set_multicast_list;
- dev->do_ioctl = &pcnet32_ioctl;
- dev->ethtool_ops = &pcnet32_ethtool_ops;
- dev->tx_timeout = pcnet32_tx_timeout;
- dev->watchdog_timeo = (5*HZ);
+
+ init_timer(&lp->watchdog_timer);
+ lp->watchdog_timer.data = (unsigned long)dev;
+ lp->watchdog_timer.function = (void *)&pcnet32_watchdog;
+
+ /* The PCNET32-specific entries in the device structure. */
+ dev->open = &pcnet32_open;
+ dev->hard_start_xmit = &pcnet32_start_xmit;
+ dev->stop = &pcnet32_close;
+ dev->get_stats = &pcnet32_get_stats;
+ dev->set_multicast_list = &pcnet32_set_multicast_list;
+ dev->do_ioctl = &pcnet32_ioctl;
+ dev->ethtool_ops = &pcnet32_ethtool_ops;
+ dev->tx_timeout = pcnet32_tx_timeout;
+ dev->watchdog_timeo = (5 * HZ);
#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = pcnet32_poll_controller;
+ dev->poll_controller = pcnet32_poll_controller;
#endif
- /* Fill in the generic fields of the device structure. */
- if (register_netdev(dev))
- goto err_free_ring;
-
- if (pdev) {
- pci_set_drvdata(pdev, dev);
- } else {
- lp->next = pcnet32_dev;
- pcnet32_dev = dev;
- }
-
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name);
- cards_found++;
-
- /* enable LED writes */
- a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000);
-
- return 0;
-
-err_free_ring:
- pcnet32_free_ring(dev);
-err_free_consistent:
- pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
-err_free_netdev:
- free_netdev(dev);
-err_release_region:
- release_region(ioaddr, PCNET32_TOTAL_SIZE);
- return ret;
-}
+ /* Fill in the generic fields of the device structure. */
+ if (register_netdev(dev))
+ goto err_free_ring;
+
+ if (pdev) {
+ pci_set_drvdata(pdev, dev);
+ } else {
+ lp->next = pcnet32_dev;
+ pcnet32_dev = dev;
+ }
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name);
+ cards_found++;
+
+ /* enable LED writes */
+ a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000);
+
+ return 0;
+
+ err_free_ring:
+ pcnet32_free_ring(dev);
+ err_free_consistent:
+ pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
+ err_free_netdev:
+ free_netdev(dev);
+ err_release_region:
+ release_region(ioaddr, PCNET32_TOTAL_SIZE);
+ return ret;
+}
/* if any allocation fails, caller must also call pcnet32_free_ring */
static int pcnet32_alloc_ring(struct net_device *dev, char *name)
{
- struct pcnet32_private *lp = dev->priv;
+ struct pcnet32_private *lp = dev->priv;
- lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
- sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
- &lp->tx_ring_dma_addr);
- if (lp->tx_ring == NULL) {
- if (pcnet32_debug & NETIF_MSG_DRV)
- printk("\n" KERN_ERR PFX "%s: Consistent memory allocation failed.\n",
- name);
- return -ENOMEM;
- }
-
- lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
- sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
- &lp->rx_ring_dma_addr);
- if (lp->rx_ring == NULL) {
- if (pcnet32_debug & NETIF_MSG_DRV)
- printk("\n" KERN_ERR PFX "%s: Consistent memory allocation failed.\n",
- name);
- return -ENOMEM;
- }
-
- lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size,
- GFP_ATOMIC);
- if (!lp->tx_dma_addr) {
- if (pcnet32_debug & NETIF_MSG_DRV)
- printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name);
- return -ENOMEM;
- }
- memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size);
-
- lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size,
- GFP_ATOMIC);
- if (!lp->rx_dma_addr) {
- if (pcnet32_debug & NETIF_MSG_DRV)
- printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name);
- return -ENOMEM;
- }
- memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size);
-
- lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size,
- GFP_ATOMIC);
- if (!lp->tx_skbuff) {
- if (pcnet32_debug & NETIF_MSG_DRV)
- printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name);
- return -ENOMEM;
- }
- memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size);
-
- lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size,
- GFP_ATOMIC);
- if (!lp->rx_skbuff) {
- if (pcnet32_debug & NETIF_MSG_DRV)
- printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name);
- return -ENOMEM;
- }
- memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size);
+ lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_tx_head) *
+ lp->tx_ring_size,
+ &lp->tx_ring_dma_addr);
+ if (lp->tx_ring == NULL) {
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk("\n" KERN_ERR PFX
+ "%s: Consistent memory allocation failed.\n",
+ name);
+ return -ENOMEM;
+ }
- return 0;
-}
+ lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_rx_head) *
+ lp->rx_ring_size,
+ &lp->rx_ring_dma_addr);
+ if (lp->rx_ring == NULL) {
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk("\n" KERN_ERR PFX
+ "%s: Consistent memory allocation failed.\n",
+ name);
+ return -ENOMEM;
+ }
+ lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size,
+ GFP_ATOMIC);
+ if (!lp->tx_dma_addr) {
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk("\n" KERN_ERR PFX
+ "%s: Memory allocation failed.\n", name);
+ return -ENOMEM;
+ }
+ memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size);
+
+ lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size,
+ GFP_ATOMIC);
+ if (!lp->rx_dma_addr) {
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk("\n" KERN_ERR PFX
+ "%s: Memory allocation failed.\n", name);
+ return -ENOMEM;
+ }
+ memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size);
+
+ lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size,
+ GFP_ATOMIC);
+ if (!lp->tx_skbuff) {
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk("\n" KERN_ERR PFX
+ "%s: Memory allocation failed.\n", name);
+ return -ENOMEM;
+ }
+ memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size);
+
+ lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size,
+ GFP_ATOMIC);
+ if (!lp->rx_skbuff) {
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk("\n" KERN_ERR PFX
+ "%s: Memory allocation failed.\n", name);
+ return -ENOMEM;
+ }
+ memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size);
+
+ return 0;
+}
static void pcnet32_free_ring(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
+ struct pcnet32_private *lp = dev->priv;
- kfree(lp->tx_skbuff);
- lp->tx_skbuff = NULL;
+ kfree(lp->tx_skbuff);
+ lp->tx_skbuff = NULL;
- kfree(lp->rx_skbuff);
- lp->rx_skbuff = NULL;
+ kfree(lp->rx_skbuff);
+ lp->rx_skbuff = NULL;
- kfree(lp->tx_dma_addr);
- lp->tx_dma_addr = NULL;
+ kfree(lp->tx_dma_addr);
+ lp->tx_dma_addr = NULL;
- kfree(lp->rx_dma_addr);
- lp->rx_dma_addr = NULL;
+ kfree(lp->rx_dma_addr);
+ lp->rx_dma_addr = NULL;
- if (lp->tx_ring) {
- pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
- lp->tx_ring, lp->tx_ring_dma_addr);
- lp->tx_ring = NULL;
- }
+ if (lp->tx_ring) {
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_tx_head) *
+ lp->tx_ring_size, lp->tx_ring,
+ lp->tx_ring_dma_addr);
+ lp->tx_ring = NULL;
+ }
- if (lp->rx_ring) {
- pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
- lp->rx_ring, lp->rx_ring_dma_addr);
- lp->rx_ring = NULL;
- }
+ if (lp->rx_ring) {
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_rx_head) *
+ lp->rx_ring_size, lp->rx_ring,
+ lp->rx_ring_dma_addr);
+ lp->rx_ring = NULL;
+ }
}
-
-static int
-pcnet32_open(struct net_device *dev)
+static int pcnet32_open(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long ioaddr = dev->base_addr;
- u16 val;
- int i;
- int rc;
- unsigned long flags;
-
- if (request_irq(dev->irq, &pcnet32_interrupt,
- lp->shared_irq ? SA_SHIRQ : 0, dev->name, (void *)dev)) {
- return -EAGAIN;
- }
-
- spin_lock_irqsave(&lp->lock, flags);
- /* Check for a valid station address */
- if (!is_valid_ether_addr(dev->dev_addr)) {
- rc = -EINVAL;
- goto err_free_irq;
- }
-
- /* Reset the PCNET32 */
- lp->a.reset (ioaddr);
-
- /* switch pcnet32 to 32bit mode */
- lp->a.write_bcr (ioaddr, 20, 2);
-
- if (netif_msg_ifup(lp))
- printk(KERN_DEBUG "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
- dev->name, dev->irq,
- (u32) (lp->tx_ring_dma_addr),
- (u32) (lp->rx_ring_dma_addr),
- (u32) (lp->dma_addr + offsetof(struct pcnet32_private, init_block)));
-
- /* set/reset autoselect bit */
- val = lp->a.read_bcr (ioaddr, 2) & ~2;
- if (lp->options & PCNET32_PORT_ASEL)
- val |= 2;
- lp->a.write_bcr (ioaddr, 2, val);
-
- /* handle full duplex setting */
- if (lp->mii_if.full_duplex) {
- val = lp->a.read_bcr (ioaddr, 9) & ~3;
- if (lp->options & PCNET32_PORT_FD) {
- val |= 1;
- if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ u16 val;
+ int i;
+ int rc;
+ unsigned long flags;
+
+ if (request_irq(dev->irq, &pcnet32_interrupt,
+ lp->shared_irq ? SA_SHIRQ : 0, dev->name,
+ (void *)dev)) {
+ return -EAGAIN;
+ }
+
+ spin_lock_irqsave(&lp->lock, flags);
+ /* Check for a valid station address */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ rc = -EINVAL;
+ goto err_free_irq;
+ }
+
+ /* Reset the PCNET32 */
+ lp->a.reset(ioaddr);
+
+ /* switch pcnet32 to 32bit mode */
+ lp->a.write_bcr(ioaddr, 20, 2);
+
+ if (netif_msg_ifup(lp))
+ printk(KERN_DEBUG
+ "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
+ dev->name, dev->irq, (u32) (lp->tx_ring_dma_addr),
+ (u32) (lp->rx_ring_dma_addr),
+ (u32) (lp->dma_addr +
+ offsetof(struct pcnet32_private, init_block)));
+
+ /* set/reset autoselect bit */
+ val = lp->a.read_bcr(ioaddr, 2) & ~2;
+ if (lp->options & PCNET32_PORT_ASEL)
val |= 2;
- } else if (lp->options & PCNET32_PORT_ASEL) {
- /* workaround of xSeries250, turn on for 79C975 only */
- i = ((lp->a.read_csr(ioaddr, 88) |
- (lp->a.read_csr(ioaddr,89) << 16)) >> 12) & 0xffff;
- if (i == 0x2627)
- val |= 3;
- }
- lp->a.write_bcr (ioaddr, 9, val);
- }
-
- /* set/reset GPSI bit in test register */
- val = lp->a.read_csr (ioaddr, 124) & ~0x10;
- if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
- val |= 0x10;
- lp->a.write_csr (ioaddr, 124, val);
-
- /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
- if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT &&
+ lp->a.write_bcr(ioaddr, 2, val);
+
+ /* handle full duplex setting */
+ if (lp->mii_if.full_duplex) {
+ val = lp->a.read_bcr(ioaddr, 9) & ~3;
+ if (lp->options & PCNET32_PORT_FD) {
+ val |= 1;
+ if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
+ val |= 2;
+ } else if (lp->options & PCNET32_PORT_ASEL) {
+ /* workaround of xSeries250, turn on for 79C975 only */
+ i = ((lp->a.read_csr(ioaddr, 88) |
+ (lp->a.
+ read_csr(ioaddr, 89) << 16)) >> 12) & 0xffff;
+ if (i == 0x2627)
+ val |= 3;
+ }
+ lp->a.write_bcr(ioaddr, 9, val);
+ }
+
+ /* set/reset GPSI bit in test register */
+ val = lp->a.read_csr(ioaddr, 124) & ~0x10;
+ if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
+ val |= 0x10;
+ lp->a.write_csr(ioaddr, 124, val);
+
+ /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
+ if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT &&
(lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX ||
lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
- if (lp->options & PCNET32_PORT_ASEL) {
- lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
- if (netif_msg_link(lp))
- printk(KERN_DEBUG "%s: Setting 100Mb-Full Duplex.\n",
- dev->name);
- }
- }
- {
- /*
- * 24 Jun 2004 according AMD, in order to change the PHY,
- * DANAS (or DISPM for 79C976) must be set; then select the speed,
- * duplex, and/or enable auto negotiation, and clear DANAS
- */
- if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
- lp->a.write_bcr(ioaddr, 32,
- lp->a.read_bcr(ioaddr, 32) | 0x0080);
- /* disable Auto Negotiation, set 10Mpbs, HD */
- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
- if (lp->options & PCNET32_PORT_FD)
- val |= 0x10;
- if (lp->options & PCNET32_PORT_100)
- val |= 0x08;
- lp->a.write_bcr (ioaddr, 32, val);
+ if (lp->options & PCNET32_PORT_ASEL) {
+ lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
+ if (netif_msg_link(lp))
+ printk(KERN_DEBUG
+ "%s: Setting 100Mb-Full Duplex.\n",
+ dev->name);
+ }
+ }
+ if (lp->phycount < 2) {
+ /*
+ * 24 Jun 2004 according AMD, in order to change the PHY,
+ * DANAS (or DISPM for 79C976) must be set; then select the speed,
+ * duplex, and/or enable auto negotiation, and clear DANAS
+ */
+ if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
+ lp->a.write_bcr(ioaddr, 32,
+ lp->a.read_bcr(ioaddr, 32) | 0x0080);
+ /* disable Auto Negotiation, set 10Mpbs, HD */
+ val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
+ if (lp->options & PCNET32_PORT_FD)
+ val |= 0x10;
+ if (lp->options & PCNET32_PORT_100)
+ val |= 0x08;
+ lp->a.write_bcr(ioaddr, 32, val);
+ } else {
+ if (lp->options & PCNET32_PORT_ASEL) {
+ lp->a.write_bcr(ioaddr, 32,
+ lp->a.read_bcr(ioaddr,
+ 32) | 0x0080);
+ /* enable auto negotiate, setup, disable fd */
+ val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
+ val |= 0x20;
+ lp->a.write_bcr(ioaddr, 32, val);
+ }
+ }
} else {
- if (lp->options & PCNET32_PORT_ASEL) {
- lp->a.write_bcr(ioaddr, 32,
- lp->a.read_bcr(ioaddr, 32) | 0x0080);
- /* enable auto negotiate, setup, disable fd */
- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
- val |= 0x20;
- lp->a.write_bcr(ioaddr, 32, val);
- }
+ int first_phy = -1;
+ u16 bmcr;
+ u32 bcr9;
+ struct ethtool_cmd ecmd;
+
+ /*
+ * There is really no good other way to handle multiple PHYs
+ * other than turning off all automatics
+ */
+ val = lp->a.read_bcr(ioaddr, 2);
+ lp->a.write_bcr(ioaddr, 2, val & ~2);
+ val = lp->a.read_bcr(ioaddr, 32);
+ lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
+
+ if (!(lp->options & PCNET32_PORT_ASEL)) {
+ /* setup ecmd */
+ ecmd.port = PORT_MII;
+ ecmd.transceiver = XCVR_INTERNAL;
+ ecmd.autoneg = AUTONEG_DISABLE;
+ ecmd.speed =
+ lp->
+ options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
+ bcr9 = lp->a.read_bcr(ioaddr, 9);
+
+ if (lp->options & PCNET32_PORT_FD) {
+ ecmd.duplex = DUPLEX_FULL;
+ bcr9 |= (1 << 0);
+ } else {
+ ecmd.duplex = DUPLEX_HALF;
+ bcr9 |= ~(1 << 0);
+ }
+ lp->a.write_bcr(ioaddr, 9, bcr9);
+ }
+
+ for (i = 0; i < PCNET32_MAX_PHYS; i++) {
+ if (lp->phymask & (1 << i)) {
+ /* isolate all but the first PHY */
+ bmcr = mdio_read(dev, i, MII_BMCR);
+ if (first_phy == -1) {
+ first_phy = i;
+ mdio_write(dev, i, MII_BMCR,
+ bmcr & ~BMCR_ISOLATE);
+ } else {
+ mdio_write(dev, i, MII_BMCR,
+ bmcr | BMCR_ISOLATE);
+ }
+ /* use mii_ethtool_sset to setup PHY */
+ lp->mii_if.phy_id = i;
+ ecmd.phy_address = i;
+ if (lp->options & PCNET32_PORT_ASEL) {
+ mii_ethtool_gset(&lp->mii_if, &ecmd);
+ ecmd.autoneg = AUTONEG_ENABLE;
+ }
+ mii_ethtool_sset(&lp->mii_if, &ecmd);
+ }
+ }
+ lp->mii_if.phy_id = first_phy;
+ if (netif_msg_link(lp))
+ printk(KERN_INFO "%s: Using PHY number %d.\n",
+ dev->name, first_phy);
}
- }
#ifdef DO_DXSUFLO
- if (lp->dxsuflo) { /* Disable transmit stop on underflow */
- val = lp->a.read_csr (ioaddr, 3);
- val |= 0x40;
- lp->a.write_csr (ioaddr, 3, val);
- }
+ if (lp->dxsuflo) { /* Disable transmit stop on underflow */
+ val = lp->a.read_csr(ioaddr, 3);
+ val |= 0x40;
+ lp->a.write_csr(ioaddr, 3, val);
+ }
#endif
- lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
- pcnet32_load_multicast(dev);
-
- if (pcnet32_init_ring(dev)) {
- rc = -ENOMEM;
- goto err_free_ring;
- }
-
- /* Re-initialize the PCNET32, and start it when done. */
- lp->a.write_csr (ioaddr, 1, (lp->dma_addr +
- offsetof(struct pcnet32_private, init_block)) & 0xffff);
- lp->a.write_csr (ioaddr, 2, (lp->dma_addr +
- offsetof(struct pcnet32_private, init_block)) >> 16);
-
- lp->a.write_csr (ioaddr, 4, 0x0915);
- lp->a.write_csr (ioaddr, 0, 0x0001);
-
- netif_start_queue(dev);
-
- /* If we have mii, print the link status and start the watchdog */
- if (lp->mii) {
- mii_check_media (&lp->mii_if, netif_msg_link(lp), 1);
- mod_timer (&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
- }
-
- i = 0;
- while (i++ < 100)
- if (lp->a.read_csr (ioaddr, 0) & 0x0100)
- break;
- /*
- * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
- * reports that doing so triggers a bug in the '974.
- */
- lp->a.write_csr (ioaddr, 0, 0x0042);
-
- if (netif_msg_ifup(lp))
- printk(KERN_DEBUG "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
- dev->name, i, (u32) (lp->dma_addr +
- offsetof(struct pcnet32_private, init_block)),
- lp->a.read_csr(ioaddr, 0));
-
- spin_unlock_irqrestore(&lp->lock, flags);
-
- return 0; /* Always succeed */
-
-err_free_ring:
- /* free any allocated skbuffs */
- for (i = 0; i < lp->rx_ring_size; i++) {
- lp->rx_ring[i].status = 0;
- if (lp->rx_skbuff[i]) {
- pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2,
- PCI_DMA_FROMDEVICE);
- dev_kfree_skb(lp->rx_skbuff[i]);
- }
- lp->rx_skbuff[i] = NULL;
- lp->rx_dma_addr[i] = 0;
- }
-
- pcnet32_free_ring(dev);
-
- /*
- * Switch back to 16bit mode to avoid problems with dumb
- * DOS packet driver after a warm reboot
- */
- lp->a.write_bcr (ioaddr, 20, 4);
-
-err_free_irq:
- spin_unlock_irqrestore(&lp->lock, flags);
- free_irq(dev->irq, dev);
- return rc;
+ lp->init_block.mode =
+ le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
+ pcnet32_load_multicast(dev);
+
+ if (pcnet32_init_ring(dev)) {
+ rc = -ENOMEM;
+ goto err_free_ring;
+ }
+
+ /* Re-initialize the PCNET32, and start it when done. */
+ lp->a.write_csr(ioaddr, 1, (lp->dma_addr +
+ offsetof(struct pcnet32_private,
+ init_block)) & 0xffff);
+ lp->a.write_csr(ioaddr, 2,
+ (lp->dma_addr +
+ offsetof(struct pcnet32_private, init_block)) >> 16);
+
+ lp->a.write_csr(ioaddr, 4, 0x0915);
+ lp->a.write_csr(ioaddr, 0, 0x0001);
+
+ netif_start_queue(dev);
+
+ /* Print the link status and start the watchdog */
+ pcnet32_check_media(dev, 1);
+ mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
+
+ i = 0;
+ while (i++ < 100)
+ if (lp->a.read_csr(ioaddr, 0) & 0x0100)
+ break;
+ /*
+ * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
+ * reports that doing so triggers a bug in the '974.
+ */
+ lp->a.write_csr(ioaddr, 0, 0x0042);
+
+ if (netif_msg_ifup(lp))
+ printk(KERN_DEBUG
+ "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
+ dev->name, i,
+ (u32) (lp->dma_addr +
+ offsetof(struct pcnet32_private, init_block)),
+ lp->a.read_csr(ioaddr, 0));
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return 0; /* Always succeed */
+
+ err_free_ring:
+ /* free any allocated skbuffs */
+ for (i = 0; i < lp->rx_ring_size; i++) {
+ lp->rx_ring[i].status = 0;
+ if (lp->rx_skbuff[i]) {
+ pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
+ PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(lp->rx_skbuff[i]);
+ }
+ lp->rx_skbuff[i] = NULL;
+ lp->rx_dma_addr[i] = 0;
+ }
+
+ pcnet32_free_ring(dev);
+
+ /*
+ * Switch back to 16bit mode to avoid problems with dumb
+ * DOS packet driver after a warm reboot
+ */
+ lp->a.write_bcr(ioaddr, 20, 4);
+
+ err_free_irq:
+ spin_unlock_irqrestore(&lp->lock, flags);
+ free_irq(dev->irq, dev);
+ return rc;
}
/*
@@ -1746,727 +1801,893 @@ err_free_irq:
* restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
*/
-static void
-pcnet32_purge_tx_ring(struct net_device *dev)
+static void pcnet32_purge_tx_ring(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- int i;
-
- for (i = 0; i < lp->tx_ring_size; i++) {
- lp->tx_ring[i].status = 0; /* CPU owns buffer */
- wmb(); /* Make sure adapter sees owner change */
- if (lp->tx_skbuff[i]) {
- pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
- lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE);
- dev_kfree_skb_any(lp->tx_skbuff[i]);
- }
- lp->tx_skbuff[i] = NULL;
- lp->tx_dma_addr[i] = 0;
- }
-}
+ struct pcnet32_private *lp = dev->priv;
+ int i;
+ for (i = 0; i < lp->tx_ring_size; i++) {
+ lp->tx_ring[i].status = 0; /* CPU owns buffer */
+ wmb(); /* Make sure adapter sees owner change */
+ if (lp->tx_skbuff[i]) {
+ pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
+ lp->tx_skbuff[i]->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(lp->tx_skbuff[i]);
+ }
+ lp->tx_skbuff[i] = NULL;
+ lp->tx_dma_addr[i] = 0;
+ }
+}
/* Initialize the PCNET32 Rx and Tx rings. */
-static int
-pcnet32_init_ring(struct net_device *dev)
+static int pcnet32_init_ring(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- int i;
-
- lp->tx_full = 0;
- lp->cur_rx = lp->cur_tx = 0;
- lp->dirty_rx = lp->dirty_tx = 0;
-
- for (i = 0; i < lp->rx_ring_size; i++) {
- struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
- if (rx_skbuff == NULL) {
- if (!(rx_skbuff = lp->rx_skbuff[i] = dev_alloc_skb (PKT_BUF_SZ))) {
- /* there is not much, we can do at this point */
- if (pcnet32_debug & NETIF_MSG_DRV)
- printk(KERN_ERR "%s: pcnet32_init_ring dev_alloc_skb failed.\n",
- dev->name);
- return -1;
- }
- skb_reserve (rx_skbuff, 2);
- }
-
- rmb();
- if (lp->rx_dma_addr[i] == 0)
- lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->data,
- PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE);
- lp->rx_ring[i].base = (u32)le32_to_cpu(lp->rx_dma_addr[i]);
- lp->rx_ring[i].buf_length = le16_to_cpu(2-PKT_BUF_SZ);
- wmb(); /* Make sure owner changes after all others are visible */
- lp->rx_ring[i].status = le16_to_cpu(0x8000);
- }
- /* The Tx buffer address is filled in as needed, but we do need to clear
- * the upper ownership bit. */
- for (i = 0; i < lp->tx_ring_size; i++) {
- lp->tx_ring[i].status = 0; /* CPU owns buffer */
- wmb(); /* Make sure adapter sees owner change */
- lp->tx_ring[i].base = 0;
- lp->tx_dma_addr[i] = 0;
- }
-
- lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
- for (i = 0; i < 6; i++)
- lp->init_block.phys_addr[i] = dev->dev_addr[i];
- lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr);
- lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr);
- wmb(); /* Make sure all changes are visible */
- return 0;
+ struct pcnet32_private *lp = dev->priv;
+ int i;
+
+ lp->tx_full = 0;
+ lp->cur_rx = lp->cur_tx = 0;
+ lp->dirty_rx = lp->dirty_tx = 0;
+
+ for (i = 0; i < lp->rx_ring_size; i++) {
+ struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
+ if (rx_skbuff == NULL) {
+ if (!
+ (rx_skbuff = lp->rx_skbuff[i] =
+ dev_alloc_skb(PKT_BUF_SZ))) {
+ /* there is not much, we can do at this point */
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk(KERN_ERR
+ "%s: pcnet32_init_ring dev_alloc_skb failed.\n",
+ dev->name);
+ return -1;
+ }
+ skb_reserve(rx_skbuff, 2);
+ }
+
+ rmb();
+ if (lp->rx_dma_addr[i] == 0)
+ lp->rx_dma_addr[i] =
+ pci_map_single(lp->pci_dev, rx_skbuff->data,
+ PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
+ lp->rx_ring[i].base = (u32) le32_to_cpu(lp->rx_dma_addr[i]);
+ lp->rx_ring[i].buf_length = le16_to_cpu(2 - PKT_BUF_SZ);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->rx_ring[i].status = le16_to_cpu(0x8000);
+ }
+ /* The Tx buffer address is filled in as needed, but we do need to clear
+ * the upper ownership bit. */
+ for (i = 0; i < lp->tx_ring_size; i++) {
+ lp->tx_ring[i].status = 0; /* CPU owns buffer */
+ wmb(); /* Make sure adapter sees owner change */
+ lp->tx_ring[i].base = 0;
+ lp->tx_dma_addr[i] = 0;
+ }
+
+ lp->init_block.tlen_rlen =
+ le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr);
+ lp->init_block.tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr);
+ wmb(); /* Make sure all changes are visible */
+ return 0;
}
/* the pcnet32 has been issued a stop or reset. Wait for the stop bit
* then flush the pending transmit operations, re-initialize the ring,
* and tell the chip to initialize.
*/
-static void
-pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
+static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long ioaddr = dev->base_addr;
- int i;
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ int i;
- /* wait for stop */
- for (i=0; i<100; i++)
- if (lp->a.read_csr(ioaddr, 0) & 0x0004)
- break;
+ /* wait for stop */
+ for (i = 0; i < 100; i++)
+ if (lp->a.read_csr(ioaddr, 0) & 0x0004)
+ break;
- if (i >= 100 && netif_msg_drv(lp))
- printk(KERN_ERR "%s: pcnet32_restart timed out waiting for stop.\n",
- dev->name);
+ if (i >= 100 && netif_msg_drv(lp))
+ printk(KERN_ERR
+ "%s: pcnet32_restart timed out waiting for stop.\n",
+ dev->name);
- pcnet32_purge_tx_ring(dev);
- if (pcnet32_init_ring(dev))
- return;
+ pcnet32_purge_tx_ring(dev);
+ if (pcnet32_init_ring(dev))
+ return;
- /* ReInit Ring */
- lp->a.write_csr (ioaddr, 0, 1);
- i = 0;
- while (i++ < 1000)
- if (lp->a.read_csr (ioaddr, 0) & 0x0100)
- break;
+ /* ReInit Ring */
+ lp->a.write_csr(ioaddr, 0, 1);
+ i = 0;
+ while (i++ < 1000)
+ if (lp->a.read_csr(ioaddr, 0) & 0x0100)
+ break;
- lp->a.write_csr (ioaddr, 0, csr0_bits);
+ lp->a.write_csr(ioaddr, 0, csr0_bits);
}
-
-static void
-pcnet32_tx_timeout (struct net_device *dev)
+static void pcnet32_tx_timeout(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long ioaddr = dev->base_addr, flags;
-
- spin_lock_irqsave(&lp->lock, flags);
- /* Transmitter timeout, serious problems. */
- if (pcnet32_debug & NETIF_MSG_DRV)
- printk(KERN_ERR "%s: transmit timed out, status %4.4x, resetting.\n",
- dev->name, lp->a.read_csr(ioaddr, 0));
- lp->a.write_csr (ioaddr, 0, 0x0004);
- lp->stats.tx_errors++;
- if (netif_msg_tx_err(lp)) {
- int i;
- printk(KERN_DEBUG " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
- lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
- lp->cur_rx);
- for (i = 0 ; i < lp->rx_ring_size; i++)
- printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
- le32_to_cpu(lp->rx_ring[i].base),
- (-le16_to_cpu(lp->rx_ring[i].buf_length)) & 0xffff,
- le32_to_cpu(lp->rx_ring[i].msg_length),
- le16_to_cpu(lp->rx_ring[i].status));
- for (i = 0 ; i < lp->tx_ring_size; i++)
- printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
- le32_to_cpu(lp->tx_ring[i].base),
- (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
- le32_to_cpu(lp->tx_ring[i].misc),
- le16_to_cpu(lp->tx_ring[i].status));
- printk("\n");
- }
- pcnet32_restart(dev, 0x0042);
-
- dev->trans_start = jiffies;
- netif_wake_queue(dev);
-
- spin_unlock_irqrestore(&lp->lock, flags);
-}
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr, flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ /* Transmitter timeout, serious problems. */
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk(KERN_ERR
+ "%s: transmit timed out, status %4.4x, resetting.\n",
+ dev->name, lp->a.read_csr(ioaddr, 0));
+ lp->a.write_csr(ioaddr, 0, 0x0004);
+ lp->stats.tx_errors++;
+ if (netif_msg_tx_err(lp)) {
+ int i;
+ printk(KERN_DEBUG
+ " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
+ lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
+ lp->cur_rx);
+ for (i = 0; i < lp->rx_ring_size; i++)
+ printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
+ le32_to_cpu(lp->rx_ring[i].base),
+ (-le16_to_cpu(lp->rx_ring[i].buf_length)) &
+ 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length),
+ le16_to_cpu(lp->rx_ring[i].status));
+ for (i = 0; i < lp->tx_ring_size; i++)
+ printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
+ le32_to_cpu(lp->tx_ring[i].base),
+ (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
+ le32_to_cpu(lp->tx_ring[i].misc),
+ le16_to_cpu(lp->tx_ring[i].status));
+ printk("\n");
+ }
+ pcnet32_restart(dev, 0x0042);
+
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
-static int
-pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static int pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long ioaddr = dev->base_addr;
- u16 status;
- int entry;
- unsigned long flags;
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ u16 status;
+ int entry;
+ unsigned long flags;
- spin_lock_irqsave(&lp->lock, flags);
+ spin_lock_irqsave(&lp->lock, flags);
- if (netif_msg_tx_queued(lp)) {
- printk(KERN_DEBUG "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
- dev->name, lp->a.read_csr(ioaddr, 0));
- }
+ if (netif_msg_tx_queued(lp)) {
+ printk(KERN_DEBUG
+ "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
+ dev->name, lp->a.read_csr(ioaddr, 0));
+ }
- /* Default status -- will not enable Successful-TxDone
- * interrupt when that option is available to us.
- */
- status = 0x8300;
+ /* Default status -- will not enable Successful-TxDone
+ * interrupt when that option is available to us.
+ */
+ status = 0x8300;
- /* Fill in a Tx ring entry */
+ /* Fill in a Tx ring entry */
- /* Mask to ring buffer boundary. */
- entry = lp->cur_tx & lp->tx_mod_mask;
+ /* Mask to ring buffer boundary. */
+ entry = lp->cur_tx & lp->tx_mod_mask;
- /* Caution: the write order is important here, set the status
- * with the "ownership" bits last. */
+ /* Caution: the write order is important here, set the status
+ * with the "ownership" bits last. */
- lp->tx_ring[entry].length = le16_to_cpu(-skb->len);
+ lp->tx_ring[entry].length = le16_to_cpu(-skb->len);
- lp->tx_ring[entry].misc = 0x00000000;
+ lp->tx_ring[entry].misc = 0x00000000;
- lp->tx_skbuff[entry] = skb;
- lp->tx_dma_addr[entry] = pci_map_single(lp->pci_dev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- lp->tx_ring[entry].base = (u32)le32_to_cpu(lp->tx_dma_addr[entry]);
- wmb(); /* Make sure owner changes after all others are visible */
- lp->tx_ring[entry].status = le16_to_cpu(status);
+ lp->tx_skbuff[entry] = skb;
+ lp->tx_dma_addr[entry] =
+ pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
+ lp->tx_ring[entry].base = (u32) le32_to_cpu(lp->tx_dma_addr[entry]);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->tx_ring[entry].status = le16_to_cpu(status);
- lp->cur_tx++;
- lp->stats.tx_bytes += skb->len;
+ lp->cur_tx++;
+ lp->stats.tx_bytes += skb->len;
- /* Trigger an immediate send poll. */
- lp->a.write_csr (ioaddr, 0, 0x0048);
+ /* Trigger an immediate send poll. */
+ lp->a.write_csr(ioaddr, 0, 0x0048);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies;
- if (lp->tx_ring[(entry+1) & lp->tx_mod_mask].base != 0) {
- lp->tx_full = 1;
- netif_stop_queue(dev);
- }
- spin_unlock_irqrestore(&lp->lock, flags);
- return 0;
+ if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
+ lp->tx_full = 1;
+ netif_stop_queue(dev);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return 0;
}
/* The PCNET32 interrupt handler. */
static irqreturn_t
-pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+pcnet32_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
- struct net_device *dev = dev_id;
- struct pcnet32_private *lp;
- unsigned long ioaddr;
- u16 csr0,rap;
- int boguscnt = max_interrupt_work;
- int must_restart;
-
- if (!dev) {
- if (pcnet32_debug & NETIF_MSG_INTR)
- printk (KERN_DEBUG "%s(): irq %d for unknown device\n",
- __FUNCTION__, irq);
- return IRQ_NONE;
- }
-
- ioaddr = dev->base_addr;
- lp = dev->priv;
-
- spin_lock(&lp->lock);
-
- rap = lp->a.read_rap(ioaddr);
- while ((csr0 = lp->a.read_csr (ioaddr, 0)) & 0x8f00 && --boguscnt >= 0) {
- if (csr0 == 0xffff) {
- break; /* PCMCIA remove happened */
+ struct net_device *dev = dev_id;
+ struct pcnet32_private *lp;
+ unsigned long ioaddr;
+ u16 csr0, rap;
+ int boguscnt = max_interrupt_work;
+ int must_restart;
+
+ if (!dev) {
+ if (pcnet32_debug & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s(): irq %d for unknown device\n",
+ __FUNCTION__, irq);
+ return IRQ_NONE;
}
- /* Acknowledge all of the current interrupt sources ASAP. */
- lp->a.write_csr (ioaddr, 0, csr0 & ~0x004f);
- must_restart = 0;
+ ioaddr = dev->base_addr;
+ lp = dev->priv;
- if (netif_msg_intr(lp))
- printk(KERN_DEBUG "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
- dev->name, csr0, lp->a.read_csr (ioaddr, 0));
-
- if (csr0 & 0x0400) /* Rx interrupt */
- pcnet32_rx(dev);
-
- if (csr0 & 0x0200) { /* Tx-done interrupt */
- unsigned int dirty_tx = lp->dirty_tx;
- int delta;
-
- while (dirty_tx != lp->cur_tx) {
- int entry = dirty_tx & lp->tx_mod_mask;
- int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
-
- if (status < 0)
- break; /* It still hasn't been Txed */
-
- lp->tx_ring[entry].base = 0;
-
- if (status & 0x4000) {
- /* There was an major error, log it. */
- int err_status = le32_to_cpu(lp->tx_ring[entry].misc);
- lp->stats.tx_errors++;
- if (netif_msg_tx_err(lp))
- printk(KERN_ERR "%s: Tx error status=%04x err_status=%08x\n",
- dev->name, status, err_status);
- if (err_status & 0x04000000) lp->stats.tx_aborted_errors++;
- if (err_status & 0x08000000) lp->stats.tx_carrier_errors++;
- if (err_status & 0x10000000) lp->stats.tx_window_errors++;
+ spin_lock(&lp->lock);
+
+ rap = lp->a.read_rap(ioaddr);
+ while ((csr0 = lp->a.read_csr(ioaddr, 0)) & 0x8f00 && --boguscnt >= 0) {
+ if (csr0 == 0xffff) {
+ break; /* PCMCIA remove happened */
+ }
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ lp->a.write_csr(ioaddr, 0, csr0 & ~0x004f);
+
+ must_restart = 0;
+
+ if (netif_msg_intr(lp))
+ printk(KERN_DEBUG
+ "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
+ dev->name, csr0, lp->a.read_csr(ioaddr, 0));
+
+ if (csr0 & 0x0400) /* Rx interrupt */
+ pcnet32_rx(dev);
+
+ if (csr0 & 0x0200) { /* Tx-done interrupt */
+ unsigned int dirty_tx = lp->dirty_tx;
+ int delta;
+
+ while (dirty_tx != lp->cur_tx) {
+ int entry = dirty_tx & lp->tx_mod_mask;
+ int status =
+ (short)le16_to_cpu(lp->tx_ring[entry].
+ status);
+
+ if (status < 0)
+ break; /* It still hasn't been Txed */
+
+ lp->tx_ring[entry].base = 0;
+
+ if (status & 0x4000) {
+ /* There was an major error, log it. */
+ int err_status =
+ le32_to_cpu(lp->tx_ring[entry].
+ misc);
+ lp->stats.tx_errors++;
+ if (netif_msg_tx_err(lp))
+ printk(KERN_ERR
+ "%s: Tx error status=%04x err_status=%08x\n",
+ dev->name, status,
+ err_status);
+ if (err_status & 0x04000000)
+ lp->stats.tx_aborted_errors++;
+ if (err_status & 0x08000000)
+ lp->stats.tx_carrier_errors++;
+ if (err_status & 0x10000000)
+ lp->stats.tx_window_errors++;
#ifndef DO_DXSUFLO
- if (err_status & 0x40000000) {
- lp->stats.tx_fifo_errors++;
- /* Ackk! On FIFO errors the Tx unit is turned off! */
- /* Remove this verbosity later! */
- if (netif_msg_tx_err(lp))
- printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n",
- dev->name, csr0);
- must_restart = 1;
- }
+ if (err_status & 0x40000000) {
+ lp->stats.tx_fifo_errors++;
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ /* Remove this verbosity later! */
+ if (netif_msg_tx_err(lp))
+ printk(KERN_ERR
+ "%s: Tx FIFO error! CSR0=%4.4x\n",
+ dev->name, csr0);
+ must_restart = 1;
+ }
#else
- if (err_status & 0x40000000) {
- lp->stats.tx_fifo_errors++;
- if (! lp->dxsuflo) { /* If controller doesn't recover ... */
- /* Ackk! On FIFO errors the Tx unit is turned off! */
- /* Remove this verbosity later! */
- if (netif_msg_tx_err(lp))
- printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n",
- dev->name, csr0);
- must_restart = 1;
- }
- }
+ if (err_status & 0x40000000) {
+ lp->stats.tx_fifo_errors++;
+ if (!lp->dxsuflo) { /* If controller doesn't recover ... */
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ /* Remove this verbosity later! */
+ if (netif_msg_tx_err
+ (lp))
+ printk(KERN_ERR
+ "%s: Tx FIFO error! CSR0=%4.4x\n",
+ dev->
+ name,
+ csr0);
+ must_restart = 1;
+ }
+ }
#endif
- } else {
- if (status & 0x1800)
- lp->stats.collisions++;
- lp->stats.tx_packets++;
+ } else {
+ if (status & 0x1800)
+ lp->stats.collisions++;
+ lp->stats.tx_packets++;
+ }
+
+ /* We must free the original skb */
+ if (lp->tx_skbuff[entry]) {
+ pci_unmap_single(lp->pci_dev,
+ lp->tx_dma_addr[entry],
+ lp->tx_skbuff[entry]->
+ len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(lp->tx_skbuff[entry]);
+ lp->tx_skbuff[entry] = NULL;
+ lp->tx_dma_addr[entry] = 0;
+ }
+ dirty_tx++;
+ }
+
+ delta =
+ (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask +
+ lp->tx_ring_size);
+ if (delta > lp->tx_ring_size) {
+ if (netif_msg_drv(lp))
+ printk(KERN_ERR
+ "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dev->name, dirty_tx, lp->cur_tx,
+ lp->tx_full);
+ dirty_tx += lp->tx_ring_size;
+ delta -= lp->tx_ring_size;
+ }
+
+ if (lp->tx_full &&
+ netif_queue_stopped(dev) &&
+ delta < lp->tx_ring_size - 2) {
+ /* The ring is no longer full, clear tbusy. */
+ lp->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+ lp->dirty_tx = dirty_tx;
+ }
+
+ /* Log misc errors. */
+ if (csr0 & 0x4000)
+ lp->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & 0x1000) {
+ /*
+ * this happens when our receive ring is full. This shouldn't
+ * be a problem as we will see normal rx interrupts for the frames
+ * in the receive ring. But there are some PCI chipsets (I can
+ * reproduce this on SP3G with Intel saturn chipset) which have
+ * sometimes problems and will fill up the receive ring with
+ * error descriptors. In this situation we don't get a rx
+ * interrupt, but a missed frame interrupt sooner or later.
+ * So we try to clean up our receive ring here.
+ */
+ pcnet32_rx(dev);
+ lp->stats.rx_errors++; /* Missed a Rx frame. */
+ }
+ if (csr0 & 0x0800) {
+ if (netif_msg_drv(lp))
+ printk(KERN_ERR
+ "%s: Bus master arbitration failure, status %4.4x.\n",
+ dev->name, csr0);
+ /* unlike for the lance, there is no restart needed */
}
- /* We must free the original skb */
- if (lp->tx_skbuff[entry]) {
- pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[entry],
- lp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
- dev_kfree_skb_irq(lp->tx_skbuff[entry]);
- lp->tx_skbuff[entry] = NULL;
- lp->tx_dma_addr[entry] = 0;
+ if (must_restart) {
+ /* reset the chip to clear the error condition, then restart */
+ lp->a.reset(ioaddr);
+ lp->a.write_csr(ioaddr, 4, 0x0915);
+ pcnet32_restart(dev, 0x0002);
+ netif_wake_queue(dev);
}
- dirty_tx++;
- }
-
- delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size);
- if (delta > lp->tx_ring_size) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
- dev->name, dirty_tx, lp->cur_tx, lp->tx_full);
- dirty_tx += lp->tx_ring_size;
- delta -= lp->tx_ring_size;
- }
-
- if (lp->tx_full &&
- netif_queue_stopped(dev) &&
- delta < lp->tx_ring_size - 2) {
- /* The ring is no longer full, clear tbusy. */
- lp->tx_full = 0;
- netif_wake_queue (dev);
- }
- lp->dirty_tx = dirty_tx;
- }
-
- /* Log misc errors. */
- if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */
- if (csr0 & 0x1000) {
- /*
- * this happens when our receive ring is full. This shouldn't
- * be a problem as we will see normal rx interrupts for the frames
- * in the receive ring. But there are some PCI chipsets (I can
- * reproduce this on SP3G with Intel saturn chipset) which have
- * sometimes problems and will fill up the receive ring with
- * error descriptors. In this situation we don't get a rx
- * interrupt, but a missed frame interrupt sooner or later.
- * So we try to clean up our receive ring here.
- */
- pcnet32_rx(dev);
- lp->stats.rx_errors++; /* Missed a Rx frame. */
- }
- if (csr0 & 0x0800) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR "%s: Bus master arbitration failure, status %4.4x.\n",
- dev->name, csr0);
- /* unlike for the lance, there is no restart needed */
- }
-
- if (must_restart) {
- /* reset the chip to clear the error condition, then restart */
- lp->a.reset(ioaddr);
- lp->a.write_csr(ioaddr, 4, 0x0915);
- pcnet32_restart(dev, 0x0002);
- netif_wake_queue(dev);
- }
- }
-
- /* Set interrupt enable. */
- lp->a.write_csr (ioaddr, 0, 0x0040);
- lp->a.write_rap (ioaddr,rap);
-
- if (netif_msg_intr(lp))
- printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
- dev->name, lp->a.read_csr (ioaddr, 0));
-
- spin_unlock(&lp->lock);
-
- return IRQ_HANDLED;
+ }
+
+ /* Set interrupt enable. */
+ lp->a.write_csr(ioaddr, 0, 0x0040);
+ lp->a.write_rap(ioaddr, rap);
+
+ if (netif_msg_intr(lp))
+ printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
+ dev->name, lp->a.read_csr(ioaddr, 0));
+
+ spin_unlock(&lp->lock);
+
+ return IRQ_HANDLED;
}
-static int
-pcnet32_rx(struct net_device *dev)
+static int pcnet32_rx(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- int entry = lp->cur_rx & lp->rx_mod_mask;
- int boguscnt = lp->rx_ring_size / 2;
-
- /* If we own the next entry, it's a new packet. Send it up. */
- while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) {
- int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8;
-
- if (status != 0x03) { /* There was an error. */
- /*
- * There is a tricky error noted by John Murphy,
- * <murf@perftech.com> to Russ Nelson: Even with full-sized
- * buffers it's possible for a jabber packet to use two
- * buffers, with only the last correctly noting the error.
- */
- if (status & 0x01) /* Only count a general error at the */
- lp->stats.rx_errors++; /* end of a packet.*/
- if (status & 0x20) lp->stats.rx_frame_errors++;
- if (status & 0x10) lp->stats.rx_over_errors++;
- if (status & 0x08) lp->stats.rx_crc_errors++;
- if (status & 0x04) lp->stats.rx_fifo_errors++;
- lp->rx_ring[entry].status &= le16_to_cpu(0x03ff);
- } else {
- /* Malloc up new buffer, compatible with net-2e. */
- short pkt_len = (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff)-4;
- struct sk_buff *skb;
-
- /* Discard oversize frames. */
- if (unlikely(pkt_len > PKT_BUF_SZ - 2)) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR "%s: Impossible packet size %d!\n",
- dev->name, pkt_len);
- lp->stats.rx_errors++;
- } else if (pkt_len < 60) {
- if (netif_msg_rx_err(lp))
- printk(KERN_ERR "%s: Runt packet!\n", dev->name);
- lp->stats.rx_errors++;
- } else {
- int rx_in_place = 0;
-
- if (pkt_len > rx_copybreak) {
- struct sk_buff *newskb;
-
- if ((newskb = dev_alloc_skb(PKT_BUF_SZ))) {
- skb_reserve (newskb, 2);
- skb = lp->rx_skbuff[entry];
- pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[entry],
- PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE);
- skb_put (skb, pkt_len);
- lp->rx_skbuff[entry] = newskb;
- newskb->dev = dev;
- lp->rx_dma_addr[entry] =
- pci_map_single(lp->pci_dev, newskb->data,
- PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE);
- lp->rx_ring[entry].base = le32_to_cpu(lp->rx_dma_addr[entry]);
- rx_in_place = 1;
- } else
- skb = NULL;
+ struct pcnet32_private *lp = dev->priv;
+ int entry = lp->cur_rx & lp->rx_mod_mask;
+ int boguscnt = lp->rx_ring_size / 2;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) {
+ int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8;
+
+ if (status != 0x03) { /* There was an error. */
+ /*
+ * There is a tricky error noted by John Murphy,
+ * <murf@perftech.com> to Russ Nelson: Even with full-sized
+ * buffers it's possible for a jabber packet to use two
+ * buffers, with only the last correctly noting the error.
+ */
+ if (status & 0x01) /* Only count a general error at the */
+ lp->stats.rx_errors++; /* end of a packet. */
+ if (status & 0x20)
+ lp->stats.rx_frame_errors++;
+ if (status & 0x10)
+ lp->stats.rx_over_errors++;
+ if (status & 0x08)
+ lp->stats.rx_crc_errors++;
+ if (status & 0x04)
+ lp->stats.rx_fifo_errors++;
+ lp->rx_ring[entry].status &= le16_to_cpu(0x03ff);
} else {
- skb = dev_alloc_skb(pkt_len+2);
- }
-
- if (skb == NULL) {
- int i;
- if (netif_msg_drv(lp))
- printk(KERN_ERR "%s: Memory squeeze, deferring packet.\n",
- dev->name);
- for (i = 0; i < lp->rx_ring_size; i++)
- if ((short)le16_to_cpu(lp->rx_ring[(entry+i)
- & lp->rx_mod_mask].status) < 0)
- break;
-
- if (i > lp->rx_ring_size -2) {
- lp->stats.rx_dropped++;
- lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
- wmb(); /* Make sure adapter sees owner change */
- lp->cur_rx++;
- }
- break;
- }
- skb->dev = dev;
- if (!rx_in_place) {
- skb_reserve(skb,2); /* 16 byte align */
- skb_put(skb,pkt_len); /* Make room */
- pci_dma_sync_single_for_cpu(lp->pci_dev,
- lp->rx_dma_addr[entry],
- PKT_BUF_SZ-2,
- PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(skb,
- (unsigned char *)(lp->rx_skbuff[entry]->data),
- pkt_len,0);
- pci_dma_sync_single_for_device(lp->pci_dev,
- lp->rx_dma_addr[entry],
- PKT_BUF_SZ-2,
- PCI_DMA_FROMDEVICE);
+ /* Malloc up new buffer, compatible with net-2e. */
+ short pkt_len =
+ (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff)
+ - 4;
+ struct sk_buff *skb;
+
+ /* Discard oversize frames. */
+ if (unlikely(pkt_len > PKT_BUF_SZ - 2)) {
+ if (netif_msg_drv(lp))
+ printk(KERN_ERR
+ "%s: Impossible packet size %d!\n",
+ dev->name, pkt_len);
+ lp->stats.rx_errors++;
+ } else if (pkt_len < 60) {
+ if (netif_msg_rx_err(lp))
+ printk(KERN_ERR "%s: Runt packet!\n",
+ dev->name);
+ lp->stats.rx_errors++;
+ } else {
+ int rx_in_place = 0;
+
+ if (pkt_len > rx_copybreak) {
+ struct sk_buff *newskb;
+
+ if ((newskb =
+ dev_alloc_skb(PKT_BUF_SZ))) {
+ skb_reserve(newskb, 2);
+ skb = lp->rx_skbuff[entry];
+ pci_unmap_single(lp->pci_dev,
+ lp->
+ rx_dma_addr
+ [entry],
+ PKT_BUF_SZ - 2,
+ PCI_DMA_FROMDEVICE);
+ skb_put(skb, pkt_len);
+ lp->rx_skbuff[entry] = newskb;
+ newskb->dev = dev;
+ lp->rx_dma_addr[entry] =
+ pci_map_single(lp->pci_dev,
+ newskb->data,
+ PKT_BUF_SZ -
+ 2,
+ PCI_DMA_FROMDEVICE);
+ lp->rx_ring[entry].base =
+ le32_to_cpu(lp->
+ rx_dma_addr
+ [entry]);
+ rx_in_place = 1;
+ } else
+ skb = NULL;
+ } else {
+ skb = dev_alloc_skb(pkt_len + 2);
+ }
+
+ if (skb == NULL) {
+ int i;
+ if (netif_msg_drv(lp))
+ printk(KERN_ERR
+ "%s: Memory squeeze, deferring packet.\n",
+ dev->name);
+ for (i = 0; i < lp->rx_ring_size; i++)
+ if ((short)
+ le16_to_cpu(lp->
+ rx_ring[(entry +
+ i)
+ & lp->
+ rx_mod_mask].
+ status) < 0)
+ break;
+
+ if (i > lp->rx_ring_size - 2) {
+ lp->stats.rx_dropped++;
+ lp->rx_ring[entry].status |=
+ le16_to_cpu(0x8000);
+ wmb(); /* Make sure adapter sees owner change */
+ lp->cur_rx++;
+ }
+ break;
+ }
+ skb->dev = dev;
+ if (!rx_in_place) {
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, pkt_len); /* Make room */
+ pci_dma_sync_single_for_cpu(lp->pci_dev,
+ lp->
+ rx_dma_addr
+ [entry],
+ PKT_BUF_SZ -
+ 2,
+ PCI_DMA_FROMDEVICE);
+ eth_copy_and_sum(skb,
+ (unsigned char *)(lp->
+ rx_skbuff
+ [entry]->
+ data),
+ pkt_len, 0);
+ pci_dma_sync_single_for_device(lp->
+ pci_dev,
+ lp->
+ rx_dma_addr
+ [entry],
+ PKT_BUF_SZ
+ - 2,
+ PCI_DMA_FROMDEVICE);
+ }
+ lp->stats.rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ }
}
- lp->stats.rx_bytes += skb->len;
- skb->protocol=eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->last_rx = jiffies;
- lp->stats.rx_packets++;
- }
+ /*
+ * The docs say that the buffer length isn't touched, but Andrew Boyd
+ * of QNX reports that some revs of the 79C965 clear it.
+ */
+ lp->rx_ring[entry].buf_length = le16_to_cpu(2 - PKT_BUF_SZ);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
+ entry = (++lp->cur_rx) & lp->rx_mod_mask;
+ if (--boguscnt <= 0)
+ break; /* don't stay in loop forever */
}
- /*
- * The docs say that the buffer length isn't touched, but Andrew Boyd
- * of QNX reports that some revs of the 79C965 clear it.
- */
- lp->rx_ring[entry].buf_length = le16_to_cpu(2-PKT_BUF_SZ);
- wmb(); /* Make sure owner changes after all others are visible */
- lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
- entry = (++lp->cur_rx) & lp->rx_mod_mask;
- if (--boguscnt <= 0) break; /* don't stay in loop forever */
- }
-
- return 0;
+
+ return 0;
}
-static int
-pcnet32_close(struct net_device *dev)
+static int pcnet32_close(struct net_device *dev)
{
- unsigned long ioaddr = dev->base_addr;
- struct pcnet32_private *lp = dev->priv;
- int i;
- unsigned long flags;
+ unsigned long ioaddr = dev->base_addr;
+ struct pcnet32_private *lp = dev->priv;
+ int i;
+ unsigned long flags;
- del_timer_sync(&lp->watchdog_timer);
+ del_timer_sync(&lp->watchdog_timer);
- netif_stop_queue(dev);
+ netif_stop_queue(dev);
- spin_lock_irqsave(&lp->lock, flags);
+ spin_lock_irqsave(&lp->lock, flags);
- lp->stats.rx_missed_errors = lp->a.read_csr (ioaddr, 112);
+ lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
- if (netif_msg_ifdown(lp))
- printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
- dev->name, lp->a.read_csr (ioaddr, 0));
+ if (netif_msg_ifdown(lp))
+ printk(KERN_DEBUG
+ "%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, lp->a.read_csr(ioaddr, 0));
- /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
- lp->a.write_csr (ioaddr, 0, 0x0004);
+ /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
+ lp->a.write_csr(ioaddr, 0, 0x0004);
- /*
- * Switch back to 16bit mode to avoid problems with dumb
- * DOS packet driver after a warm reboot
- */
- lp->a.write_bcr (ioaddr, 20, 4);
+ /*
+ * Switch back to 16bit mode to avoid problems with dumb
+ * DOS packet driver after a warm reboot
+ */
+ lp->a.write_bcr(ioaddr, 20, 4);
- spin_unlock_irqrestore(&lp->lock, flags);
+ spin_unlock_irqrestore(&lp->lock, flags);
- free_irq(dev->irq, dev);
+ free_irq(dev->irq, dev);
- spin_lock_irqsave(&lp->lock, flags);
+ spin_lock_irqsave(&lp->lock, flags);
- /* free all allocated skbuffs */
- for (i = 0; i < lp->rx_ring_size; i++) {
- lp->rx_ring[i].status = 0;
- wmb(); /* Make sure adapter sees owner change */
- if (lp->rx_skbuff[i]) {
- pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2,
- PCI_DMA_FROMDEVICE);
- dev_kfree_skb(lp->rx_skbuff[i]);
+ /* free all allocated skbuffs */
+ for (i = 0; i < lp->rx_ring_size; i++) {
+ lp->rx_ring[i].status = 0;
+ wmb(); /* Make sure adapter sees owner change */
+ if (lp->rx_skbuff[i]) {
+ pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
+ PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(lp->rx_skbuff[i]);
+ }
+ lp->rx_skbuff[i] = NULL;
+ lp->rx_dma_addr[i] = 0;
}
- lp->rx_skbuff[i] = NULL;
- lp->rx_dma_addr[i] = 0;
- }
- for (i = 0; i < lp->tx_ring_size; i++) {
- lp->tx_ring[i].status = 0; /* CPU owns buffer */
- wmb(); /* Make sure adapter sees owner change */
- if (lp->tx_skbuff[i]) {
- pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
- lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE);
- dev_kfree_skb(lp->tx_skbuff[i]);
+ for (i = 0; i < lp->tx_ring_size; i++) {
+ lp->tx_ring[i].status = 0; /* CPU owns buffer */
+ wmb(); /* Make sure adapter sees owner change */
+ if (lp->tx_skbuff[i]) {
+ pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
+ lp->tx_skbuff[i]->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb(lp->tx_skbuff[i]);
+ }
+ lp->tx_skbuff[i] = NULL;
+ lp->tx_dma_addr[i] = 0;
}
- lp->tx_skbuff[i] = NULL;
- lp->tx_dma_addr[i] = 0;
- }
- spin_unlock_irqrestore(&lp->lock, flags);
+ spin_unlock_irqrestore(&lp->lock, flags);
- return 0;
+ return 0;
}
-static struct net_device_stats *
-pcnet32_get_stats(struct net_device *dev)
+static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long ioaddr = dev->base_addr;
- u16 saved_addr;
- unsigned long flags;
-
- spin_lock_irqsave(&lp->lock, flags);
- saved_addr = lp->a.read_rap(ioaddr);
- lp->stats.rx_missed_errors = lp->a.read_csr (ioaddr, 112);
- lp->a.write_rap(ioaddr, saved_addr);
- spin_unlock_irqrestore(&lp->lock, flags);
-
- return &lp->stats;
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ u16 saved_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ saved_addr = lp->a.read_rap(ioaddr);
+ lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
+ lp->a.write_rap(ioaddr, saved_addr);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return &lp->stats;
}
/* taken from the sunlance driver, which it took from the depca driver */
-static void pcnet32_load_multicast (struct net_device *dev)
+static void pcnet32_load_multicast(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- volatile struct pcnet32_init_block *ib = &lp->init_block;
- volatile u16 *mcast_table = (u16 *)&ib->filter;
- struct dev_mc_list *dmi=dev->mc_list;
- char *addrs;
- int i;
- u32 crc;
-
- /* set all multicast bits */
- if (dev->flags & IFF_ALLMULTI) {
- ib->filter[0] = 0xffffffff;
- ib->filter[1] = 0xffffffff;
+ struct pcnet32_private *lp = dev->priv;
+ volatile struct pcnet32_init_block *ib = &lp->init_block;
+ volatile u16 *mcast_table = (u16 *) & ib->filter;
+ struct dev_mc_list *dmi = dev->mc_list;
+ char *addrs;
+ int i;
+ u32 crc;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI) {
+ ib->filter[0] = 0xffffffff;
+ ib->filter[1] = 0xffffffff;
+ return;
+ }
+ /* clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+
+ /* Add addresses */
+ for (i = 0; i < dev->mc_count; i++) {
+ addrs = dmi->dmi_addr;
+ dmi = dmi->next;
+
+ /* multicast address? */
+ if (!(*addrs & 1))
+ continue;
+
+ crc = ether_crc_le(6, addrs);
+ crc = crc >> 26;
+ mcast_table[crc >> 4] =
+ le16_to_cpu(le16_to_cpu(mcast_table[crc >> 4]) |
+ (1 << (crc & 0xf)));
+ }
return;
- }
- /* clear the multicast filter */
- ib->filter[0] = 0;
- ib->filter[1] = 0;
-
- /* Add addresses */
- for (i = 0; i < dev->mc_count; i++) {
- addrs = dmi->dmi_addr;
- dmi = dmi->next;
-
- /* multicast address? */
- if (!(*addrs & 1))
- continue;
-
- crc = ether_crc_le(6, addrs);
- crc = crc >> 26;
- mcast_table [crc >> 4] = le16_to_cpu(
- le16_to_cpu(mcast_table [crc >> 4]) | (1 << (crc & 0xf)));
- }
- return;
}
-
/*
* Set or clear the multicast filter for this adaptor.
*/
static void pcnet32_set_multicast_list(struct net_device *dev)
{
- unsigned long ioaddr = dev->base_addr, flags;
- struct pcnet32_private *lp = dev->priv;
-
- spin_lock_irqsave(&lp->lock, flags);
- if (dev->flags&IFF_PROMISC) {
- /* Log any net taps. */
- if (netif_msg_hw(lp))
- printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
- lp->init_block.mode = le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << 7);
- } else {
- lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
- pcnet32_load_multicast (dev);
- }
-
- lp->a.write_csr (ioaddr, 0, 0x0004); /* Temporarily stop the lance. */
- pcnet32_restart(dev, 0x0042); /* Resume normal operation */
- netif_wake_queue(dev);
-
- spin_unlock_irqrestore(&lp->lock, flags);
+ unsigned long ioaddr = dev->base_addr, flags;
+ struct pcnet32_private *lp = dev->priv;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ if (dev->flags & IFF_PROMISC) {
+ /* Log any net taps. */
+ if (netif_msg_hw(lp))
+ printk(KERN_INFO "%s: Promiscuous mode enabled.\n",
+ dev->name);
+ lp->init_block.mode =
+ le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
+ 7);
+ } else {
+ lp->init_block.mode =
+ le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
+ pcnet32_load_multicast(dev);
+ }
+
+ lp->a.write_csr(ioaddr, 0, 0x0004); /* Temporarily stop the lance. */
+ pcnet32_restart(dev, 0x0042); /* Resume normal operation */
+ netif_wake_queue(dev);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
}
/* This routine assumes that the lp->lock is held */
static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long ioaddr = dev->base_addr;
- u16 val_out;
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ u16 val_out;
- if (!lp->mii)
- return 0;
+ if (!lp->mii)
+ return 0;
- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
- val_out = lp->a.read_bcr(ioaddr, 34);
+ lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
+ val_out = lp->a.read_bcr(ioaddr, 34);
- return val_out;
+ return val_out;
}
/* This routine assumes that the lp->lock is held */
static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long ioaddr = dev->base_addr;
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr;
- if (!lp->mii)
- return;
+ if (!lp->mii)
+ return;
- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
- lp->a.write_bcr(ioaddr, 34, val);
+ lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
+ lp->a.write_bcr(ioaddr, 34, val);
}
static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct pcnet32_private *lp = dev->priv;
- int rc;
- unsigned long flags;
+ struct pcnet32_private *lp = dev->priv;
+ int rc;
+ unsigned long flags;
+
+ /* SIOC[GS]MIIxxx ioctls */
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ } else {
+ rc = -EOPNOTSUPP;
+ }
+
+ return rc;
+}
+
+static int pcnet32_check_otherphy(struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+ struct mii_if_info mii = lp->mii_if;
+ u16 bmcr;
+ int i;
- /* SIOC[GS]MIIxxx ioctls */
- if (lp->mii) {
- spin_lock_irqsave(&lp->lock, flags);
- rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL);
- spin_unlock_irqrestore(&lp->lock, flags);
- } else {
- rc = -EOPNOTSUPP;
- }
+ for (i = 0; i < PCNET32_MAX_PHYS; i++) {
+ if (i == lp->mii_if.phy_id)
+ continue; /* skip active phy */
+ if (lp->phymask & (1 << i)) {
+ mii.phy_id = i;
+ if (mii_link_ok(&mii)) {
+ /* found PHY with active link */
+ if (netif_msg_link(lp))
+ printk(KERN_INFO
+ "%s: Using PHY number %d.\n",
+ dev->name, i);
+
+ /* isolate inactive phy */
+ bmcr =
+ mdio_read(dev, lp->mii_if.phy_id, MII_BMCR);
+ mdio_write(dev, lp->mii_if.phy_id, MII_BMCR,
+ bmcr | BMCR_ISOLATE);
+
+ /* de-isolate new phy */
+ bmcr = mdio_read(dev, i, MII_BMCR);
+ mdio_write(dev, i, MII_BMCR,
+ bmcr & ~BMCR_ISOLATE);
+
+ /* set new phy address */
+ lp->mii_if.phy_id = i;
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * Show the status of the media. Similar to mii_check_media however it
+ * correctly shows the link speed for all (tested) pcnet32 variants.
+ * Devices with no mii just report link state without speed.
+ *
+ * Caller is assumed to hold and release the lp->lock.
+ */
- return rc;
+static void pcnet32_check_media(struct net_device *dev, int verbose)
+{
+ struct pcnet32_private *lp = dev->priv;
+ int curr_link;
+ int prev_link = netif_carrier_ok(dev) ? 1 : 0;
+ u32 bcr9;
+
+ if (lp->mii) {
+ curr_link = mii_link_ok(&lp->mii_if);
+ } else {
+ ulong ioaddr = dev->base_addr; /* card base I/O address */
+ curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
+ }
+ if (!curr_link) {
+ if (prev_link || verbose) {
+ netif_carrier_off(dev);
+ if (netif_msg_link(lp))
+ printk(KERN_INFO "%s: link down\n", dev->name);
+ }
+ if (lp->phycount > 1) {
+ curr_link = pcnet32_check_otherphy(dev);
+ prev_link = 0;
+ }
+ } else if (verbose || !prev_link) {
+ netif_carrier_on(dev);
+ if (lp->mii) {
+ if (netif_msg_link(lp)) {
+ struct ethtool_cmd ecmd;
+ mii_ethtool_gset(&lp->mii_if, &ecmd);
+ printk(KERN_INFO
+ "%s: link up, %sMbps, %s-duplex\n",
+ dev->name,
+ (ecmd.speed == SPEED_100) ? "100" : "10",
+ (ecmd.duplex ==
+ DUPLEX_FULL) ? "full" : "half");
+ }
+ bcr9 = lp->a.read_bcr(dev->base_addr, 9);
+ if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
+ if (lp->mii_if.full_duplex)
+ bcr9 |= (1 << 0);
+ else
+ bcr9 &= ~(1 << 0);
+ lp->a.write_bcr(dev->base_addr, 9, bcr9);
+ }
+ } else {
+ if (netif_msg_link(lp))
+ printk(KERN_INFO "%s: link up\n", dev->name);
+ }
+ }
}
+/*
+ * Check for loss of link and link establishment.
+ * Can not use mii_check_media because it does nothing if mode is forced.
+ */
+
static void pcnet32_watchdog(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long flags;
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long flags;
- /* Print the link status if it has changed */
- if (lp->mii) {
+ /* Print the link status if it has changed */
spin_lock_irqsave(&lp->lock, flags);
- mii_check_media (&lp->mii_if, netif_msg_link(lp), 0);
+ pcnet32_check_media(dev, 0);
spin_unlock_irqrestore(&lp->lock, flags);
- }
- mod_timer (&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
+ mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
}
static void __devexit pcnet32_remove_one(struct pci_dev *pdev)
{
- struct net_device *dev = pci_get_drvdata(pdev);
-
- if (dev) {
- struct pcnet32_private *lp = dev->priv;
-
- unregister_netdev(dev);
- pcnet32_free_ring(dev);
- release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
- pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
- free_netdev(dev);
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
- }
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct pcnet32_private *lp = dev->priv;
+
+ unregister_netdev(dev);
+ pcnet32_free_ring(dev);
+ release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
+ pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
+ free_netdev(dev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
}
static struct pci_driver pcnet32_driver = {
- .name = DRV_NAME,
- .probe = pcnet32_probe_pci,
- .remove = __devexit_p(pcnet32_remove_one),
- .id_table = pcnet32_pci_tbl,
+ .name = DRV_NAME,
+ .probe = pcnet32_probe_pci,
+ .remove = __devexit_p(pcnet32_remove_one),
+ .id_table = pcnet32_pci_tbl,
};
/* An additional parameter that may be passed in... */
@@ -2477,9 +2698,11 @@ static int pcnet32_have_pci;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, DRV_NAME " debug level");
module_param(max_interrupt_work, int, 0);
-MODULE_PARM_DESC(max_interrupt_work, DRV_NAME " maximum events handled per interrupt");
+MODULE_PARM_DESC(max_interrupt_work,
+ DRV_NAME " maximum events handled per interrupt");
module_param(rx_copybreak, int, 0);
-MODULE_PARM_DESC(rx_copybreak, DRV_NAME " copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(rx_copybreak,
+ DRV_NAME " copy breakpoint for copy-only-tiny-frames");
module_param(tx_start_pt, int, 0);
MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)");
module_param(pcnet32vlb, int, 0);
@@ -2490,7 +2713,9 @@ module_param_array(full_duplex, int, NULL, 0);
MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)");
/* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */
module_param_array(homepna, int, NULL, 0);
-MODULE_PARM_DESC(homepna, DRV_NAME " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet");
+MODULE_PARM_DESC(homepna,
+ DRV_NAME
+ " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet");
MODULE_AUTHOR("Thomas Bogendoerfer");
MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards");
@@ -2500,44 +2725,44 @@ MODULE_LICENSE("GPL");
static int __init pcnet32_init_module(void)
{
- printk(KERN_INFO "%s", version);
+ printk(KERN_INFO "%s", version);
- pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
+ pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
- if ((tx_start_pt >= 0) && (tx_start_pt <= 3))
- tx_start = tx_start_pt;
+ if ((tx_start_pt >= 0) && (tx_start_pt <= 3))
+ tx_start = tx_start_pt;
- /* find the PCI devices */
- if (!pci_module_init(&pcnet32_driver))
- pcnet32_have_pci = 1;
+ /* find the PCI devices */
+ if (!pci_module_init(&pcnet32_driver))
+ pcnet32_have_pci = 1;
- /* should we find any remaining VLbus devices ? */
- if (pcnet32vlb)
- pcnet32_probe_vlbus();
+ /* should we find any remaining VLbus devices ? */
+ if (pcnet32vlb)
+ pcnet32_probe_vlbus();
- if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
- printk(KERN_INFO PFX "%d cards_found.\n", cards_found);
+ if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
+ printk(KERN_INFO PFX "%d cards_found.\n", cards_found);
- return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV;
+ return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV;
}
static void __exit pcnet32_cleanup_module(void)
{
- struct net_device *next_dev;
-
- while (pcnet32_dev) {
- struct pcnet32_private *lp = pcnet32_dev->priv;
- next_dev = lp->next;
- unregister_netdev(pcnet32_dev);
- pcnet32_free_ring(pcnet32_dev);
- release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
- pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
- free_netdev(pcnet32_dev);
- pcnet32_dev = next_dev;
- }
+ struct net_device *next_dev;
+
+ while (pcnet32_dev) {
+ struct pcnet32_private *lp = pcnet32_dev->priv;
+ next_dev = lp->next;
+ unregister_netdev(pcnet32_dev);
+ pcnet32_free_ring(pcnet32_dev);
+ release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
+ pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
+ free_netdev(pcnet32_dev);
+ pcnet32_dev = next_dev;
+ }
- if (pcnet32_have_pci)
- pci_unregister_driver(&pcnet32_driver);
+ if (pcnet32_have_pci)
+ pci_unregister_driver(&pcnet32_driver);
}
module_init(pcnet32_init_module);
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c
index a4b2b6975d6..0784f558ca9 100644
--- a/drivers/net/skfp/fplustm.c
+++ b/drivers/net/skfp/fplustm.c
@@ -549,12 +549,12 @@ void formac_tx_restart(struct s_smc *smc)
static void enable_formac(struct s_smc *smc)
{
/* set formac IMSK : 0 enables irq */
- outpw(FM_A(FM_IMSK1U),~mac_imsk1u) ;
- outpw(FM_A(FM_IMSK1L),~mac_imsk1l) ;
- outpw(FM_A(FM_IMSK2U),~mac_imsk2u) ;
- outpw(FM_A(FM_IMSK2L),~mac_imsk2l) ;
- outpw(FM_A(FM_IMSK3U),~mac_imsk3u) ;
- outpw(FM_A(FM_IMSK3L),~mac_imsk3l) ;
+ outpw(FM_A(FM_IMSK1U),(unsigned short)~mac_imsk1u);
+ outpw(FM_A(FM_IMSK1L),(unsigned short)~mac_imsk1l);
+ outpw(FM_A(FM_IMSK2U),(unsigned short)~mac_imsk2u);
+ outpw(FM_A(FM_IMSK2L),(unsigned short)~mac_imsk2l);
+ outpw(FM_A(FM_IMSK3U),(unsigned short)~mac_imsk3u);
+ outpw(FM_A(FM_IMSK3L),(unsigned short)~mac_imsk3l);
}
#if 0 /* Removed because the driver should use the ASICs TX complete IRQ. */
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 25e028b7ce4..4eda81d41b1 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -44,7 +44,7 @@
#include "skge.h"
#define DRV_NAME "skge"
-#define DRV_VERSION "1.3"
+#define DRV_VERSION "1.4"
#define PFX DRV_NAME " "
#define DEFAULT_TX_RING_SIZE 128
@@ -104,7 +104,6 @@ static const int txqaddr[] = { Q_XA1, Q_XA2 };
static const int rxqaddr[] = { Q_R1, Q_R2 };
static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
-static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 };
static int skge_get_regs_len(struct net_device *dev)
{
@@ -728,19 +727,18 @@ static struct ethtool_ops skge_ethtool_ops = {
* Allocate ring elements and chain them together
* One-to-one association of board descriptors with ring elements
*/
-static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base)
+static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
{
struct skge_tx_desc *d;
struct skge_element *e;
int i;
- ring->start = kmalloc(sizeof(*e)*ring->count, GFP_KERNEL);
+ ring->start = kcalloc(sizeof(*e), ring->count, GFP_KERNEL);
if (!ring->start)
return -ENOMEM;
for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) {
e->desc = d;
- e->skb = NULL;
if (i == ring->count - 1) {
e->next = ring->start;
d->next_offset = base;
@@ -2169,27 +2167,31 @@ static int skge_up(struct net_device *dev)
if (!skge->mem)
return -ENOMEM;
+ BUG_ON(skge->dma & 7);
+
+ if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) {
+ printk(KERN_ERR PFX "pci_alloc_consistent region crosses 4G boundary\n");
+ err = -EINVAL;
+ goto free_pci_mem;
+ }
+
memset(skge->mem, 0, skge->mem_size);
- if ((err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma)))
+ err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma);
+ if (err)
goto free_pci_mem;
err = skge_rx_fill(skge);
if (err)
goto free_rx_ring;
- if ((err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size,
- skge->dma + rx_size)))
+ err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size,
+ skge->dma + rx_size);
+ if (err)
goto free_rx_ring;
skge->tx_avail = skge->tx_ring.count - 1;
- /* Enable IRQ from port */
- spin_lock_irq(&hw->hw_lock);
- hw->intr_mask |= portirqmask[port];
- skge_write32(hw, B0_IMSK, hw->intr_mask);
- spin_unlock_irq(&hw->hw_lock);
-
/* Initialize MAC */
spin_lock_bh(&hw->phy_lock);
if (hw->chip_id == CHIP_ID_GENESIS)
@@ -2246,11 +2248,6 @@ static int skge_down(struct net_device *dev)
else
yukon_stop(skge);
- spin_lock_irq(&hw->hw_lock);
- hw->intr_mask &= ~portirqmask[skge->port];
- skge_write32(hw, B0_IMSK, hw->intr_mask);
- spin_unlock_irq(&hw->hw_lock);
-
/* Stop transmitter */
skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
@@ -2307,18 +2304,15 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
int i;
u32 control, len;
u64 map;
- unsigned long flags;
skb = skb_padto(skb, ETH_ZLEN);
if (!skb)
return NETDEV_TX_OK;
- local_irq_save(flags);
if (!spin_trylock(&skge->tx_lock)) {
- /* Collision - tell upper layer to requeue */
- local_irq_restore(flags);
- return NETDEV_TX_LOCKED;
- }
+ /* Collision - tell upper layer to requeue */
+ return NETDEV_TX_LOCKED;
+ }
if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) {
if (!netif_queue_stopped(dev)) {
@@ -2327,7 +2321,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
dev->name);
}
- spin_unlock_irqrestore(&skge->tx_lock, flags);
+ spin_unlock(&skge->tx_lock);
return NETDEV_TX_BUSY;
}
@@ -2402,8 +2396,10 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
}
+ mmiowb();
+ spin_unlock(&skge->tx_lock);
+
dev->trans_start = jiffies;
- spin_unlock_irqrestore(&skge->tx_lock, flags);
return NETDEV_TX_OK;
}
@@ -2416,7 +2412,7 @@ static inline void skge_tx_free(struct skge_hw *hw, struct skge_element *e)
pci_unmap_addr(e, mapaddr),
pci_unmap_len(e, maplen),
PCI_DMA_TODEVICE);
- dev_kfree_skb_any(e->skb);
+ dev_kfree_skb(e->skb);
e->skb = NULL;
} else {
pci_unmap_page(hw->pdev,
@@ -2430,15 +2426,14 @@ static void skge_tx_clean(struct skge_port *skge)
{
struct skge_ring *ring = &skge->tx_ring;
struct skge_element *e;
- unsigned long flags;
- spin_lock_irqsave(&skge->tx_lock, flags);
+ spin_lock_bh(&skge->tx_lock);
for (e = ring->to_clean; e != ring->to_use; e = e->next) {
++skge->tx_avail;
skge_tx_free(skge->hw, e);
}
ring->to_clean = e;
- spin_unlock_irqrestore(&skge->tx_lock, flags);
+ spin_unlock_bh(&skge->tx_lock);
}
static void skge_tx_timeout(struct net_device *dev)
@@ -2663,6 +2658,37 @@ resubmit:
return NULL;
}
+static void skge_tx_done(struct skge_port *skge)
+{
+ struct skge_ring *ring = &skge->tx_ring;
+ struct skge_element *e;
+
+ spin_lock(&skge->tx_lock);
+ for (e = ring->to_clean; prefetch(e->next), e != ring->to_use; e = e->next) {
+ struct skge_tx_desc *td = e->desc;
+ u32 control;
+
+ rmb();
+ control = td->control;
+ if (control & BMU_OWN)
+ break;
+
+ if (unlikely(netif_msg_tx_done(skge)))
+ printk(KERN_DEBUG PFX "%s: tx done slot %td status 0x%x\n",
+ skge->netdev->name, e - ring->start, td->status);
+
+ skge_tx_free(skge->hw, e);
+ e->skb = NULL;
+ ++skge->tx_avail;
+ }
+ ring->to_clean = e;
+ skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
+
+ if (skge->tx_avail > MAX_SKB_FRAGS + 1)
+ netif_wake_queue(skge->netdev);
+
+ spin_unlock(&skge->tx_lock);
+}
static int skge_poll(struct net_device *dev, int *budget)
{
@@ -2670,8 +2696,10 @@ static int skge_poll(struct net_device *dev, int *budget)
struct skge_hw *hw = skge->hw;
struct skge_ring *ring = &skge->rx_ring;
struct skge_element *e;
- unsigned int to_do = min(dev->quota, *budget);
- unsigned int work_done = 0;
+ int to_do = min(dev->quota, *budget);
+ int work_done = 0;
+
+ skge_tx_done(skge);
for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
struct skge_rx_desc *rd = e->desc;
@@ -2683,8 +2711,8 @@ static int skge_poll(struct net_device *dev, int *budget)
if (control & BMU_OWN)
break;
- skb = skge_rx_get(skge, e, control, rd->status,
- le16_to_cpu(rd->csum2));
+ skb = skge_rx_get(skge, e, control, rd->status,
+ le16_to_cpu(rd->csum2));
if (likely(skb)) {
dev->last_rx = jiffies;
netif_receive_skb(skb);
@@ -2705,49 +2733,15 @@ static int skge_poll(struct net_device *dev, int *budget)
if (work_done >= to_do)
return 1; /* not done */
- spin_lock_irq(&hw->hw_lock);
- __netif_rx_complete(dev);
- hw->intr_mask |= portirqmask[skge->port];
+ netif_rx_complete(dev);
+ mmiowb();
+
+ hw->intr_mask |= skge->port == 0 ? (IS_R1_F|IS_XA1_F) : (IS_R2_F|IS_XA2_F);
skge_write32(hw, B0_IMSK, hw->intr_mask);
- spin_unlock_irq(&hw->hw_lock);
return 0;
}
-static inline void skge_tx_intr(struct net_device *dev)
-{
- struct skge_port *skge = netdev_priv(dev);
- struct skge_hw *hw = skge->hw;
- struct skge_ring *ring = &skge->tx_ring;
- struct skge_element *e;
-
- spin_lock(&skge->tx_lock);
- for (e = ring->to_clean; prefetch(e->next), e != ring->to_use; e = e->next) {
- struct skge_tx_desc *td = e->desc;
- u32 control;
-
- rmb();
- control = td->control;
- if (control & BMU_OWN)
- break;
-
- if (unlikely(netif_msg_tx_done(skge)))
- printk(KERN_DEBUG PFX "%s: tx done slot %td status 0x%x\n",
- dev->name, e - ring->start, td->status);
-
- skge_tx_free(hw, e);
- e->skb = NULL;
- ++skge->tx_avail;
- }
- ring->to_clean = e;
- skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
-
- if (skge->tx_avail > MAX_SKB_FRAGS + 1)
- netif_wake_queue(dev);
-
- spin_unlock(&skge->tx_lock);
-}
-
/* Parity errors seem to happen when Genesis is connected to a switch
* with no other ports present. Heartbeat error??
*/
@@ -2770,17 +2764,6 @@ static void skge_mac_parity(struct skge_hw *hw, int port)
? GMF_CLI_TX_FC : GMF_CLI_TX_PE);
}
-static void skge_pci_clear(struct skge_hw *hw)
-{
- u16 status;
-
- pci_read_config_word(hw->pdev, PCI_STATUS, &status);
- skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
- pci_write_config_word(hw->pdev, PCI_STATUS,
- status | PCI_STATUS_ERROR_BITS);
- skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
-}
-
static void skge_mac_intr(struct skge_hw *hw, int port)
{
if (hw->chip_id == CHIP_ID_GENESIS)
@@ -2822,23 +2805,39 @@ static void skge_error_irq(struct skge_hw *hw)
if (hwstatus & IS_M2_PAR_ERR)
skge_mac_parity(hw, 1);
- if (hwstatus & IS_R1_PAR_ERR)
+ if (hwstatus & IS_R1_PAR_ERR) {
+ printk(KERN_ERR PFX "%s: receive queue parity error\n",
+ hw->dev[0]->name);
skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P);
+ }
- if (hwstatus & IS_R2_PAR_ERR)
+ if (hwstatus & IS_R2_PAR_ERR) {
+ printk(KERN_ERR PFX "%s: receive queue parity error\n",
+ hw->dev[1]->name);
skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P);
+ }
if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) {
- printk(KERN_ERR PFX "hardware error detected (status 0x%x)\n",
- hwstatus);
+ u16 pci_status, pci_cmd;
+
+ pci_read_config_word(hw->pdev, PCI_COMMAND, &pci_cmd);
+ pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
- skge_pci_clear(hw);
+ printk(KERN_ERR PFX "%s: PCI error cmd=%#x status=%#x\n",
+ pci_name(hw->pdev), pci_cmd, pci_status);
+
+ /* Write the error bits back to clear them. */
+ pci_status &= PCI_STATUS_ERROR_BITS;
+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ pci_write_config_word(hw->pdev, PCI_COMMAND,
+ pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
+ pci_write_config_word(hw->pdev, PCI_STATUS, pci_status);
+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
/* if error still set then just ignore it */
hwstatus = skge_read32(hw, B0_HWE_ISRC);
if (hwstatus & IS_IRQ_STAT) {
- pr_debug("IRQ status %x: still set ignoring hardware errors\n",
- hwstatus);
+ printk(KERN_INFO PFX "unable to clear error (so ignoring them)\n");
hw->intr_mask &= ~IS_HW_ERR;
}
}
@@ -2855,12 +2854,11 @@ static void skge_extirq(unsigned long data)
int port;
spin_lock(&hw->phy_lock);
- for (port = 0; port < 2; port++) {
+ for (port = 0; port < hw->ports; port++) {
struct net_device *dev = hw->dev[port];
+ struct skge_port *skge = netdev_priv(dev);
- if (dev && netif_running(dev)) {
- struct skge_port *skge = netdev_priv(dev);
-
+ if (netif_running(dev)) {
if (hw->chip_id != CHIP_ID_GENESIS)
yukon_phy_intr(skge);
else
@@ -2869,38 +2867,39 @@ static void skge_extirq(unsigned long data)
}
spin_unlock(&hw->phy_lock);
- spin_lock_irq(&hw->hw_lock);
hw->intr_mask |= IS_EXT_REG;
skge_write32(hw, B0_IMSK, hw->intr_mask);
- spin_unlock_irq(&hw->hw_lock);
}
static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
{
struct skge_hw *hw = dev_id;
- u32 status = skge_read32(hw, B0_SP_ISRC);
+ u32 status;
- if (status == 0 || status == ~0) /* hotplug or shared irq */
+ /* Reading this register masks IRQ */
+ status = skge_read32(hw, B0_SP_ISRC);
+ if (status == 0)
return IRQ_NONE;
- spin_lock(&hw->hw_lock);
- if (status & IS_R1_F) {
+ if (status & IS_EXT_REG) {
+ hw->intr_mask &= ~IS_EXT_REG;
+ tasklet_schedule(&hw->ext_tasklet);
+ }
+
+ if (status & (IS_R1_F|IS_XA1_F)) {
skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
- hw->intr_mask &= ~IS_R1_F;
+ hw->intr_mask &= ~(IS_R1_F|IS_XA1_F);
netif_rx_schedule(hw->dev[0]);
}
- if (status & IS_R2_F) {
+ if (status & (IS_R2_F|IS_XA2_F)) {
skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
- hw->intr_mask &= ~IS_R2_F;
+ hw->intr_mask &= ~(IS_R2_F|IS_XA2_F);
netif_rx_schedule(hw->dev[1]);
}
- if (status & IS_XA1_F)
- skge_tx_intr(hw->dev[0]);
-
- if (status & IS_XA2_F)
- skge_tx_intr(hw->dev[1]);
+ if (likely((status & hw->intr_mask) == 0))
+ return IRQ_HANDLED;
if (status & IS_PA_TO_RX1) {
struct skge_port *skge = netdev_priv(hw->dev[0]);
@@ -2929,13 +2928,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
if (status & IS_HW_ERR)
skge_error_irq(hw);
- if (status & IS_EXT_REG) {
- hw->intr_mask &= ~IS_EXT_REG;
- tasklet_schedule(&hw->ext_tasklet);
- }
-
skge_write32(hw, B0_IMSK, hw->intr_mask);
- spin_unlock(&hw->hw_lock);
return IRQ_HANDLED;
}
@@ -3010,7 +3003,7 @@ static const char *skge_board_name(const struct skge_hw *hw)
static int skge_reset(struct skge_hw *hw)
{
u32 reg;
- u16 ctst;
+ u16 ctst, pci_status;
u8 t8, mac_cfg, pmd_type, phy_type;
int i;
@@ -3021,8 +3014,13 @@ static int skge_reset(struct skge_hw *hw)
skge_write8(hw, B0_CTST, CS_RST_CLR);
/* clear PCI errors, if any */
- skge_pci_clear(hw);
+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ skge_write8(hw, B2_TST_CTRL2, 0);
+ pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
+ pci_write_config_word(hw->pdev, PCI_STATUS,
+ pci_status | PCI_STATUS_ERROR_BITS);
+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
skge_write8(hw, B0_CTST, CS_MRST_CLR);
/* restore CLK_RUN bits (for Yukon-Lite) */
@@ -3081,7 +3079,10 @@ static int skge_reset(struct skge_hw *hw)
else
hw->ram_size = t8 * 4096;
- hw->intr_mask = IS_HW_ERR | IS_EXT_REG;
+ hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1;
+ if (hw->ports > 1)
+ hw->intr_mask |= IS_PORT_2;
+
if (hw->chip_id == CHIP_ID_GENESIS)
genesis_init(hw);
else {
@@ -3251,13 +3252,15 @@ static int __devinit skge_probe(struct pci_dev *pdev,
struct skge_hw *hw;
int err, using_dac = 0;
- if ((err = pci_enable_device(pdev))) {
+ err = pci_enable_device(pdev);
+ if (err) {
printk(KERN_ERR PFX "%s cannot enable PCI device\n",
pci_name(pdev));
goto err_out;
}
- if ((err = pci_request_regions(pdev, DRV_NAME))) {
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
pci_name(pdev));
goto err_out_disable_pdev;
@@ -3265,22 +3268,18 @@ static int __devinit skge_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- if (sizeof(dma_addr_t) > sizeof(u32) &&
- !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
+ if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
using_dac = 1;
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
- if (err < 0) {
- printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA "
- "for consistent allocations\n", pci_name(pdev));
- goto err_out_free_regions;
- }
- } else {
- err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
- if (err) {
- printk(KERN_ERR PFX "%s no usable DMA configuration\n",
- pci_name(pdev));
- goto err_out_free_regions;
- }
+ } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
+ using_dac = 0;
+ err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ }
+
+ if (err) {
+ printk(KERN_ERR PFX "%s no usable DMA configuration\n",
+ pci_name(pdev));
+ goto err_out_free_regions;
}
#ifdef __BIG_ENDIAN
@@ -3304,7 +3303,6 @@ static int __devinit skge_probe(struct pci_dev *pdev,
hw->pdev = pdev;
spin_lock_init(&hw->phy_lock);
- spin_lock_init(&hw->hw_lock);
tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw);
hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
@@ -3314,7 +3312,8 @@ static int __devinit skge_probe(struct pci_dev *pdev,
goto err_out_free_hw;
}
- if ((err = request_irq(pdev->irq, skge_intr, SA_SHIRQ, DRV_NAME, hw))) {
+ err = request_irq(pdev->irq, skge_intr, SA_SHIRQ, DRV_NAME, hw);
+ if (err) {
printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
pci_name(pdev), pdev->irq);
goto err_out_iounmap;
@@ -3332,7 +3331,8 @@ static int __devinit skge_probe(struct pci_dev *pdev,
if ((dev = skge_devinit(hw, 0, using_dac)) == NULL)
goto err_out_led_off;
- if ((err = register_netdev(dev))) {
+ err = register_netdev(dev);
+ if (err) {
printk(KERN_ERR PFX "%s: cannot register net device\n",
pci_name(pdev));
goto err_out_free_netdev;
@@ -3387,7 +3387,6 @@ static void __devexit skge_remove(struct pci_dev *pdev)
skge_write32(hw, B0_IMSK, 0);
skge_write16(hw, B0_LED, LED_STAT_OFF);
- skge_pci_clear(hw);
skge_write8(hw, B0_CTST, CS_RST_SET);
tasklet_kill(&hw->ext_tasklet);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 941f12a333b..2efdacc290e 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2402,7 +2402,6 @@ struct skge_hw {
struct tasklet_struct ext_tasklet;
spinlock_t phy_lock;
- spinlock_t hw_lock;
};
enum {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 73260364cba..f08fe6c884b 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -51,7 +51,7 @@
#include "sky2.h"
#define DRV_NAME "sky2"
-#define DRV_VERSION "0.15"
+#define DRV_VERSION "1.1"
#define PFX DRV_NAME " "
/*
@@ -61,10 +61,6 @@
* a receive requires one (or two if using 64 bit dma).
*/
-#define is_ec_a1(hw) \
- unlikely((hw)->chip_id == CHIP_ID_YUKON_EC && \
- (hw)->chip_rev == CHIP_REV_YU_EC_A1)
-
#define RX_LE_SIZE 512
#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
#define RX_MAX_PENDING (RX_LE_SIZE/2 - 2)
@@ -96,6 +92,10 @@ static int copybreak __read_mostly = 256;
module_param(copybreak, int, 0);
MODULE_PARM_DESC(copybreak, "Receive copy threshold");
+static int disable_msi = 0;
+module_param(disable_msi, int, 0);
+MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
+
static const struct pci_device_id sky2_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
@@ -504,9 +504,9 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
/* Force a renegotiation */
static void sky2_phy_reinit(struct sky2_port *sky2)
{
- down(&sky2->phy_sema);
+ spin_lock_bh(&sky2->phy_lock);
sky2_phy_init(sky2->hw, sky2->port);
- up(&sky2->phy_sema);
+ spin_unlock_bh(&sky2->phy_lock);
}
static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
@@ -571,9 +571,9 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
- down(&sky2->phy_sema);
+ spin_lock_bh(&sky2->phy_lock);
sky2_phy_init(hw, port);
- up(&sky2->phy_sema);
+ spin_unlock_bh(&sky2->phy_lock);
/* MIB clear */
reg = gma_read16(hw, port, GM_PHY_ADDR);
@@ -725,37 +725,11 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
return le;
}
-/*
- * This is a workaround code taken from SysKonnect sk98lin driver
- * to deal with chip bug on Yukon EC rev 0 in the wraparound case.
- */
-static void sky2_put_idx(struct sky2_hw *hw, unsigned q,
- u16 idx, u16 *last, u16 size)
+/* Update chip's next pointer */
+static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
{
wmb();
- if (is_ec_a1(hw) && idx < *last) {
- u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
-
- if (hwget == 0) {
- /* Start prefetching again */
- sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 0xe0);
- goto setnew;
- }
-
- if (hwget == size - 1) {
- /* set watermark to one list element */
- sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 8);
-
- /* set put index to first list element */
- sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), 0);
- } else /* have hardware go to end of list */
- sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX),
- size - 1);
- } else {
-setnew:
- sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
- }
- *last = idx;
+ sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
mmiowb();
}
@@ -878,7 +852,7 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (!netif_running(dev))
return -ENODEV; /* Phy still in reset */
- switch(cmd) {
+ switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = PHY_ADDR_MARV;
@@ -886,9 +860,9 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCGMIIREG: {
u16 val = 0;
- down(&sky2->phy_sema);
+ spin_lock_bh(&sky2->phy_lock);
err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val);
- up(&sky2->phy_sema);
+ spin_unlock_bh(&sky2->phy_lock);
data->val_out = val;
break;
@@ -898,10 +872,10 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- down(&sky2->phy_sema);
+ spin_lock_bh(&sky2->phy_lock);
err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f,
data->val_in);
- up(&sky2->phy_sema);
+ spin_unlock_bh(&sky2->phy_lock);
break;
}
return err;
@@ -1001,7 +975,6 @@ static int sky2_rx_start(struct sky2_port *sky2)
/* Tell chip about available buffers */
sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put);
- sky2->rx_last_put = sky2_read16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX));
return 0;
nomem:
sky2_rx_clean(sky2);
@@ -1014,7 +987,7 @@ static int sky2_up(struct net_device *dev)
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
- u32 ramsize, rxspace;
+ u32 ramsize, rxspace, imask;
int err = -ENOMEM;
if (netif_msg_ifup(sky2))
@@ -1079,10 +1052,10 @@ static int sky2_up(struct net_device *dev)
goto err_out;
/* Enable interrupts from phy/mac for port */
- spin_lock_irq(&hw->hw_lock);
- hw->intr_mask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
- sky2_write32(hw, B0_IMSK, hw->intr_mask);
- spin_unlock_irq(&hw->hw_lock);
+ imask = sky2_read32(hw, B0_IMSK);
+ imask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
+ sky2_write32(hw, B0_IMSK, imask);
+
return 0;
err_out:
@@ -1299,8 +1272,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
}
- sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod,
- &sky2->tx_last_put, TX_RING_SIZE);
+ sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
out_unlock:
spin_unlock(&sky2->tx_lock);
@@ -1332,7 +1304,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
struct tx_ring_info *re = sky2->tx_ring + put;
struct sk_buff *skb = re->skb;
- nxt = re->idx;
+ nxt = re->idx;
BUG_ON(nxt >= TX_RING_SIZE);
prefetch(sky2->tx_ring + nxt);
@@ -1348,7 +1320,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
struct tx_ring_info *fre;
fre = sky2->tx_ring + (put + i + 1) % TX_RING_SIZE;
pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr),
- skb_shinfo(skb)->frags[i].size,
+ skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE);
}
@@ -1356,7 +1328,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
}
sky2->tx_cons = put;
- if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE)
+ if (tx_avail(sky2) > MAX_SKB_TX_LE)
netif_wake_queue(dev);
}
@@ -1375,6 +1347,7 @@ static int sky2_down(struct net_device *dev)
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
u16 ctrl;
+ u32 imask;
/* Never really got started! */
if (!sky2->tx_le)
@@ -1386,14 +1359,6 @@ static int sky2_down(struct net_device *dev)
/* Stop more packets from being queued */
netif_stop_queue(dev);
- /* Disable port IRQ */
- spin_lock_irq(&hw->hw_lock);
- hw->intr_mask &= ~((sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
- sky2_write32(hw, B0_IMSK, hw->intr_mask);
- spin_unlock_irq(&hw->hw_lock);
-
- flush_scheduled_work();
-
sky2_phy_reset(hw, port);
/* Stop transmitter */
@@ -1437,6 +1402,11 @@ static int sky2_down(struct net_device *dev)
sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
+ /* Disable port IRQ */
+ imask = sky2_read32(hw, B0_IMSK);
+ imask &= ~(sky2->port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
+ sky2_write32(hw, B0_IMSK, imask);
+
/* turn off LED's */
sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
@@ -1631,20 +1601,19 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
return 0;
}
-/*
- * Interrupt from PHY are handled outside of interrupt context
- * because accessing phy registers requires spin wait which might
- * cause excess interrupt latency.
- */
-static void sky2_phy_task(void *arg)
+/* Interrupt from PHY */
+static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
{
- struct sky2_port *sky2 = arg;
- struct sky2_hw *hw = sky2->hw;
+ struct net_device *dev = hw->dev[port];
+ struct sky2_port *sky2 = netdev_priv(dev);
u16 istatus, phystat;
- down(&sky2->phy_sema);
- istatus = gm_phy_read(hw, sky2->port, PHY_MARV_INT_STAT);
- phystat = gm_phy_read(hw, sky2->port, PHY_MARV_PHY_STAT);
+ spin_lock(&sky2->phy_lock);
+ istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
+ phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
+
+ if (!netif_running(dev))
+ goto out;
if (netif_msg_intr(sky2))
printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n",
@@ -1670,12 +1639,7 @@ static void sky2_phy_task(void *arg)
sky2_link_down(sky2);
}
out:
- up(&sky2->phy_sema);
-
- spin_lock_irq(&hw->hw_lock);
- hw->intr_mask |= (sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2;
- sky2_write32(hw, B0_IMSK, hw->intr_mask);
- spin_unlock_irq(&hw->hw_lock);
+ spin_unlock(&sky2->phy_lock);
}
@@ -1687,31 +1651,40 @@ static void sky2_tx_timeout(struct net_device *dev)
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
unsigned txq = txqaddr[sky2->port];
- u16 ridx;
-
- /* Maybe we just missed an status interrupt */
- spin_lock(&sky2->tx_lock);
- ridx = sky2_read16(hw,
- sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX);
- sky2_tx_complete(sky2, ridx);
- spin_unlock(&sky2->tx_lock);
-
- if (!netif_queue_stopped(dev)) {
- if (net_ratelimit())
- pr_info(PFX "transmit interrupt missed? recovered\n");
- return;
- }
+ u16 report, done;
if (netif_msg_timer(sky2))
printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
- sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP);
- sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
+ report = sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX);
+ done = sky2_read16(hw, Q_ADDR(txq, Q_DONE));
- sky2_tx_clean(sky2);
+ printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n",
+ dev->name,
+ sky2->tx_cons, sky2->tx_prod, report, done);
- sky2_qset(hw, txq);
- sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1);
+ if (report != done) {
+ printk(KERN_INFO PFX "status burst pending (irq moderation?)\n");
+
+ sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
+ sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
+ } else if (report != sky2->tx_cons) {
+ printk(KERN_INFO PFX "status report lost?\n");
+
+ spin_lock_bh(&sky2->tx_lock);
+ sky2_tx_complete(sky2, report);
+ spin_unlock_bh(&sky2->tx_lock);
+ } else {
+ printk(KERN_INFO PFX "hardware hung? flushing\n");
+
+ sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP);
+ sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
+
+ sky2_tx_clean(sky2);
+
+ sky2_qset(hw, txq);
+ sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1);
+ }
}
@@ -1730,6 +1703,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
struct sky2_hw *hw = sky2->hw;
int err;
u16 ctl, mode;
+ u32 imask;
if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
return -EINVAL;
@@ -1742,12 +1716,15 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+ imask = sky2_read32(hw, B0_IMSK);
sky2_write32(hw, B0_IMSK, 0);
dev->trans_start = jiffies; /* prevent tx timeout */
netif_stop_queue(dev);
netif_poll_disable(hw->dev[0]);
+ synchronize_irq(hw->pdev->irq);
+
ctl = gma_read16(hw, sky2->port, GM_GP_CTRL);
gma_write16(hw, sky2->port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
sky2_rx_stop(sky2);
@@ -1766,7 +1743,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
sky2_write8(hw, RB_ADDR(rxqaddr[sky2->port], RB_CTRL), RB_ENA_OP_MD);
err = sky2_rx_start(sky2);
- sky2_write32(hw, B0_IMSK, hw->intr_mask);
+ sky2_write32(hw, B0_IMSK, imask);
if (err)
dev_close(dev);
@@ -1843,8 +1820,7 @@ resubmit:
sky2_rx_add(sky2, re->mapaddr);
/* Tell receiver about new buffers. */
- sky2_put_idx(sky2->hw, rxqaddr[sky2->port], sky2->rx_put,
- &sky2->rx_last_put, RX_LE_SIZE);
+ sky2_put_idx(sky2->hw, rxqaddr[sky2->port], sky2->rx_put);
return skb;
@@ -1871,76 +1847,51 @@ error:
goto resubmit;
}
-/*
- * Check for transmit complete
- */
-#define TX_NO_STATUS 0xffff
-
-static void sky2_tx_check(struct sky2_hw *hw, int port, u16 last)
+/* Transmit complete */
+static inline void sky2_tx_done(struct net_device *dev, u16 last)
{
- if (last != TX_NO_STATUS) {
- struct net_device *dev = hw->dev[port];
- if (dev && netif_running(dev)) {
- struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_port *sky2 = netdev_priv(dev);
- spin_lock(&sky2->tx_lock);
- sky2_tx_complete(sky2, last);
- spin_unlock(&sky2->tx_lock);
- }
+ if (netif_running(dev)) {
+ spin_lock(&sky2->tx_lock);
+ sky2_tx_complete(sky2, last);
+ spin_unlock(&sky2->tx_lock);
}
}
-/*
- * Both ports share the same status interrupt, therefore there is only
- * one poll routine.
- */
-static int sky2_poll(struct net_device *dev0, int *budget)
+/* Process status response ring */
+static int sky2_status_intr(struct sky2_hw *hw, int to_do)
{
- struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw;
- unsigned int to_do = min(dev0->quota, *budget);
- unsigned int work_done = 0;
- u16 hwidx;
- u16 tx_done[2] = { TX_NO_STATUS, TX_NO_STATUS };
-
- sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
-
- /*
- * Kick the STAT_LEV_TIMER_CTRL timer.
- * This fixes my hangs on Yukon-EC (0xb6) rev 1.
- * The if clause is there to start the timer only if it has been
- * configured correctly and not been disabled via ethtool.
- */
- if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_START) {
- sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP);
- sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
- }
+ int work_done = 0;
- hwidx = sky2_read16(hw, STAT_PUT_IDX);
- BUG_ON(hwidx >= STATUS_RING_SIZE);
rmb();
- while (hwidx != hw->st_idx) {
+ for(;;) {
struct sky2_status_le *le = hw->st_le + hw->st_idx;
struct net_device *dev;
struct sky2_port *sky2;
struct sk_buff *skb;
u32 status;
u16 length;
+ u8 link, opcode;
+
+ opcode = le->opcode;
+ if (!opcode)
+ break;
+ opcode &= ~HW_OWNER;
- le = hw->st_le + hw->st_idx;
hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE;
- prefetch(hw->st_le + hw->st_idx);
+ le->opcode = 0;
- BUG_ON(le->link >= 2);
- dev = hw->dev[le->link];
- if (dev == NULL || !netif_running(dev))
- continue;
+ link = le->link;
+ BUG_ON(link >= 2);
+ dev = hw->dev[link];
sky2 = netdev_priv(dev);
- status = le32_to_cpu(le->status);
- length = le16_to_cpu(le->length);
+ length = le->length;
+ status = le->status;
- switch (le->opcode & ~HW_OWNER) {
+ switch (opcode) {
case OP_RXSTAT:
skb = sky2_receive(sky2, length, status);
if (!skb)
@@ -1980,42 +1931,23 @@ static int sky2_poll(struct net_device *dev0, int *budget)
case OP_TXINDEXLE:
/* TX index reports status for both ports */
- tx_done[0] = status & 0xffff;
- tx_done[1] = ((status >> 24) & 0xff)
- | (u16)(length & 0xf) << 8;
+ sky2_tx_done(hw->dev[0], status & 0xffff);
+ if (hw->dev[1])
+ sky2_tx_done(hw->dev[1],
+ ((status >> 24) & 0xff)
+ | (u16)(length & 0xf) << 8);
break;
default:
if (net_ratelimit())
printk(KERN_WARNING PFX
- "unknown status opcode 0x%x\n", le->opcode);
+ "unknown status opcode 0x%x\n", opcode);
break;
}
}
exit_loop:
- sky2_tx_check(hw, 0, tx_done[0]);
- sky2_tx_check(hw, 1, tx_done[1]);
-
- if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) {
- sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
- sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
- }
-
- if (likely(work_done < to_do)) {
- spin_lock_irq(&hw->hw_lock);
- __netif_rx_complete(dev0);
-
- hw->intr_mask |= Y2_IS_STAT_BMU;
- sky2_write32(hw, B0_IMSK, hw->intr_mask);
- spin_unlock_irq(&hw->hw_lock);
-
- return 0;
- } else {
- *budget -= work_done;
- dev0->quota -= work_done;
- return 1;
- }
+ return work_done;
}
static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
@@ -2134,57 +2066,97 @@ static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
}
}
-static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
+/* This should never happen it is a fatal situation */
+static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port,
+ const char *rxtx, u32 mask)
{
struct net_device *dev = hw->dev[port];
struct sky2_port *sky2 = netdev_priv(dev);
+ u32 imask;
+
+ printk(KERN_ERR PFX "%s: %s descriptor error (hardware problem)\n",
+ dev ? dev->name : "<not registered>", rxtx);
- hw->intr_mask &= ~(port == 0 ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
- sky2_write32(hw, B0_IMSK, hw->intr_mask);
+ imask = sky2_read32(hw, B0_IMSK);
+ imask &= ~mask;
+ sky2_write32(hw, B0_IMSK, imask);
- schedule_work(&sky2->phy_task);
+ if (dev) {
+ spin_lock(&sky2->phy_lock);
+ sky2_link_down(sky2);
+ spin_unlock(&sky2->phy_lock);
+ }
}
-static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
+static int sky2_poll(struct net_device *dev0, int *budget)
{
- struct sky2_hw *hw = dev_id;
- struct net_device *dev0 = hw->dev[0];
- u32 status;
+ struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw;
+ int work_limit = min(dev0->quota, *budget);
+ int work_done = 0;
+ u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
- status = sky2_read32(hw, B0_Y2_SP_ISRC2);
- if (status == 0 || status == ~0)
- return IRQ_NONE;
+ if (unlikely(status & ~Y2_IS_STAT_BMU)) {
+ if (status & Y2_IS_HW_ERR)
+ sky2_hw_intr(hw);
- spin_lock(&hw->hw_lock);
- if (status & Y2_IS_HW_ERR)
- sky2_hw_intr(hw);
+ if (status & Y2_IS_IRQ_PHY1)
+ sky2_phy_intr(hw, 0);
- /* Do NAPI for Rx and Tx status */
- if (status & Y2_IS_STAT_BMU) {
- hw->intr_mask &= ~Y2_IS_STAT_BMU;
- sky2_write32(hw, B0_IMSK, hw->intr_mask);
+ if (status & Y2_IS_IRQ_PHY2)
+ sky2_phy_intr(hw, 1);
- if (likely(__netif_rx_schedule_prep(dev0))) {
- prefetch(&hw->st_le[hw->st_idx]);
- __netif_rx_schedule(dev0);
- }
+ if (status & Y2_IS_IRQ_MAC1)
+ sky2_mac_intr(hw, 0);
+
+ if (status & Y2_IS_IRQ_MAC2)
+ sky2_mac_intr(hw, 1);
+
+ if (status & Y2_IS_CHK_RX1)
+ sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1);
+
+ if (status & Y2_IS_CHK_RX2)
+ sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2);
+
+ if (status & Y2_IS_CHK_TXA1)
+ sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1);
+
+ if (status & Y2_IS_CHK_TXA2)
+ sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
}
- if (status & Y2_IS_IRQ_PHY1)
- sky2_phy_intr(hw, 0);
+ if (status & Y2_IS_STAT_BMU) {
+ work_done = sky2_status_intr(hw, work_limit);
+ *budget -= work_done;
+ dev0->quota -= work_done;
+
+ if (work_done >= work_limit)
+ return 1;
- if (status & Y2_IS_IRQ_PHY2)
- sky2_phy_intr(hw, 1);
+ sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
+ }
- if (status & Y2_IS_IRQ_MAC1)
- sky2_mac_intr(hw, 0);
+ netif_rx_complete(dev0);
- if (status & Y2_IS_IRQ_MAC2)
- sky2_mac_intr(hw, 1);
+ status = sky2_read32(hw, B0_Y2_SP_LISR);
+ return 0;
+}
- sky2_write32(hw, B0_Y2_SP_ICR, 2);
+static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct sky2_hw *hw = dev_id;
+ struct net_device *dev0 = hw->dev[0];
+ u32 status;
- spin_unlock(&hw->hw_lock);
+ /* Reading this mask interrupts as side effect */
+ status = sky2_read32(hw, B0_Y2_SP_ISRC2);
+ if (status == 0 || status == ~0)
+ return IRQ_NONE;
+
+ prefetch(&hw->st_le[hw->st_idx]);
+ if (likely(__netif_rx_schedule_prep(dev0)))
+ __netif_rx_schedule(dev0);
+ else
+ printk(KERN_DEBUG PFX "irq race detected\n");
return IRQ_HANDLED;
}
@@ -2238,6 +2210,23 @@ static int sky2_reset(struct sky2_hw *hw)
return -EOPNOTSUPP;
}
+ hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
+
+ /* This rev is really old, and requires untested workarounds */
+ if (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == CHIP_REV_YU_EC_A1) {
+ printk(KERN_ERR PFX "%s: unsupported revision Yukon-%s (0x%x) rev %d\n",
+ pci_name(hw->pdev), yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL],
+ hw->chip_id, hw->chip_rev);
+ return -EOPNOTSUPP;
+ }
+
+ /* This chip is new and not tested yet */
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
+ pr_info(PFX "%s: is a version of Yukon 2 chipset that has not been tested yet.\n",
+ pci_name(hw->pdev));
+ pr_info("Please report success/failure to maintainer <shemminger@osdl.org>\n");
+ }
+
/* disable ASF */
if (hw->chip_id <= CHIP_ID_YUKON_EC) {
sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
@@ -2258,7 +2247,7 @@ static int sky2_reset(struct sky2_hw *hw)
sky2_write8(hw, B0_CTST, CS_MRST_CLR);
/* clear any PEX errors */
- if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
+ if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL);
@@ -2271,7 +2260,6 @@ static int sky2_reset(struct sky2_hw *hw)
if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
++hw->ports;
}
- hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
sky2_set_power_state(hw, PCI_D0);
@@ -2337,30 +2325,18 @@ static int sky2_reset(struct sky2_hw *hw)
/* Set the list last index */
sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1);
- /* These status setup values are copied from SysKonnect's driver */
- if (is_ec_a1(hw)) {
- /* WA for dev. #4.3 */
- sky2_write16(hw, STAT_TX_IDX_TH, 0xfff); /* Tx Threshold */
-
- /* set Status-FIFO watermark */
- sky2_write8(hw, STAT_FIFO_WM, 0x21); /* WA for dev. #4.18 */
+ sky2_write16(hw, STAT_TX_IDX_TH, 10);
+ sky2_write8(hw, STAT_FIFO_WM, 16);
- /* set Status-FIFO ISR watermark */
- sky2_write8(hw, STAT_FIFO_ISR_WM, 0x07); /* WA for dev. #4.18 */
- sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 10000));
- } else {
- sky2_write16(hw, STAT_TX_IDX_TH, 10);
- sky2_write8(hw, STAT_FIFO_WM, 16);
-
- /* set Status-FIFO ISR watermark */
- if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
- sky2_write8(hw, STAT_FIFO_ISR_WM, 4);
- else
- sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
+ /* set Status-FIFO ISR watermark */
+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
+ sky2_write8(hw, STAT_FIFO_ISR_WM, 4);
+ else
+ sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
- sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
- sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 7));
- }
+ sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
+ sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
+ sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100));
/* enable status unit */
sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON);
@@ -2743,7 +2719,7 @@ static int sky2_phys_id(struct net_device *dev, u32 data)
ms = data * 1000;
/* save initial values */
- down(&sky2->phy_sema);
+ spin_lock_bh(&sky2->phy_lock);
if (hw->chip_id == CHIP_ID_YUKON_XL) {
u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
@@ -2759,9 +2735,9 @@ static int sky2_phys_id(struct net_device *dev, u32 data)
sky2_led(hw, port, onoff);
onoff = !onoff;
- up(&sky2->phy_sema);
+ spin_unlock_bh(&sky2->phy_lock);
interrupted = msleep_interruptible(250);
- down(&sky2->phy_sema);
+ spin_lock_bh(&sky2->phy_lock);
ms -= 250;
}
@@ -2776,7 +2752,7 @@ static int sky2_phys_id(struct net_device *dev, u32 data)
gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
}
- up(&sky2->phy_sema);
+ spin_unlock_bh(&sky2->phy_lock);
return 0;
}
@@ -2806,38 +2782,6 @@ static int sky2_set_pauseparam(struct net_device *dev,
return err;
}
-#ifdef CONFIG_PM
-static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
- struct sky2_port *sky2 = netdev_priv(dev);
-
- wol->supported = WAKE_MAGIC;
- wol->wolopts = sky2->wol ? WAKE_MAGIC : 0;
-}
-
-static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
- struct sky2_port *sky2 = netdev_priv(dev);
- struct sky2_hw *hw = sky2->hw;
-
- if (wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
- return -EOPNOTSUPP;
-
- sky2->wol = wol->wolopts == WAKE_MAGIC;
-
- if (sky2->wol) {
- memcpy_toio(hw->regs + WOL_MAC_ADDR, dev->dev_addr, ETH_ALEN);
-
- sky2_write16(hw, WOL_CTRL_STAT,
- WOL_CTL_ENA_PME_ON_MAGIC_PKT |
- WOL_CTL_ENA_MAGIC_PKT_UNIT);
- } else
- sky2_write16(hw, WOL_CTRL_STAT, WOL_CTL_DEFAULT);
-
- return 0;
-}
-#endif
-
static int sky2_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *ecmd)
{
@@ -2878,19 +2822,11 @@ static int sky2_set_coalesce(struct net_device *dev,
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
- const u32 tmin = sky2_clk2us(hw, 1);
- const u32 tmax = 5000;
-
- if (ecmd->tx_coalesce_usecs != 0 &&
- (ecmd->tx_coalesce_usecs < tmin || ecmd->tx_coalesce_usecs > tmax))
- return -EINVAL;
-
- if (ecmd->rx_coalesce_usecs != 0 &&
- (ecmd->rx_coalesce_usecs < tmin || ecmd->rx_coalesce_usecs > tmax))
- return -EINVAL;
+ const u32 tmax = sky2_clk2us(hw, 0x0ffffff);
- if (ecmd->rx_coalesce_usecs_irq != 0 &&
- (ecmd->rx_coalesce_usecs_irq < tmin || ecmd->rx_coalesce_usecs_irq > tmax))
+ if (ecmd->tx_coalesce_usecs > tmax ||
+ ecmd->rx_coalesce_usecs > tmax ||
+ ecmd->rx_coalesce_usecs_irq > tmax)
return -EINVAL;
if (ecmd->tx_max_coalesced_frames >= TX_RING_SIZE-1)
@@ -3025,10 +2961,6 @@ static struct ethtool_ops sky2_ethtool_ops = {
.set_ringparam = sky2_set_ringparam,
.get_pauseparam = sky2_get_pauseparam,
.set_pauseparam = sky2_set_pauseparam,
-#ifdef CONFIG_PM
- .get_wol = sky2_get_wol,
- .set_wol = sky2_set_wol,
-#endif
.phys_id = sky2_phys_id,
.get_stats_count = sky2_get_stats_count,
.get_ethtool_stats = sky2_get_ethtool_stats,
@@ -3082,16 +3014,15 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
sky2->speed = -1;
sky2->advertising = sky2_supported_modes(hw);
- /* Receive checksum disabled for Yukon XL
+ /* Receive checksum disabled for Yukon XL
* because of observed problems with incorrect
* values when multiple packets are received in one interrupt
*/
sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL);
- INIT_WORK(&sky2->phy_task, sky2_phy_task, sky2);
- init_MUTEX(&sky2->phy_sema);
+ spin_lock_init(&sky2->phy_lock);
sky2->tx_pending = TX_DEF_PENDING;
- sky2->rx_pending = is_ec_a1(hw) ? 8 : RX_DEF_PENDING;
+ sky2->rx_pending = RX_DEF_PENDING;
sky2->rx_bufsize = sky2_buf_size(ETH_DATA_LEN);
hw->dev[port] = dev;
@@ -3133,6 +3064,66 @@ static void __devinit sky2_show_addr(struct net_device *dev)
dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
}
+/* Handle software interrupt used during MSI test */
+static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ struct sky2_hw *hw = dev_id;
+ u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
+
+ if (status == 0)
+ return IRQ_NONE;
+
+ if (status & Y2_IS_IRQ_SW) {
+ hw->msi_detected = 1;
+ wake_up(&hw->msi_wait);
+ sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
+ }
+ sky2_write32(hw, B0_Y2_SP_ICR, 2);
+
+ return IRQ_HANDLED;
+}
+
+/* Test interrupt path by forcing a a software IRQ */
+static int __devinit sky2_test_msi(struct sky2_hw *hw)
+{
+ struct pci_dev *pdev = hw->pdev;
+ int err;
+
+ sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
+
+ err = request_irq(pdev->irq, sky2_test_intr, SA_SHIRQ, DRV_NAME, hw);
+ if (err) {
+ printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
+ pci_name(pdev), pdev->irq);
+ return err;
+ }
+
+ init_waitqueue_head (&hw->msi_wait);
+
+ sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
+ wmb();
+
+ wait_event_timeout(hw->msi_wait, hw->msi_detected, HZ/10);
+
+ if (!hw->msi_detected) {
+ /* MSI test failed, go back to INTx mode */
+ printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
+ "switching to INTx mode. Please report this failure to "
+ "the PCI maintainer and include system chipset information.\n",
+ pci_name(pdev));
+
+ err = -EOPNOTSUPP;
+ sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
+ }
+
+ sky2_write32(hw, B0_IMSK, 0);
+
+ free_irq(pdev->irq, hw);
+
+ return err;
+}
+
static int __devinit sky2_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -3201,7 +3192,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
goto err_out_free_hw;
}
hw->pm_cap = pm_cap;
- spin_lock_init(&hw->hw_lock);
#ifdef __BIG_ENDIAN
/* byte swap descriptors in hardware */
@@ -3254,21 +3244,29 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
}
}
- err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw);
+ if (!disable_msi && pci_enable_msi(pdev) == 0) {
+ err = sky2_test_msi(hw);
+ if (err == -EOPNOTSUPP)
+ pci_disable_msi(pdev);
+ else if (err)
+ goto err_out_unregister;
+ }
+
+ err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw);
if (err) {
printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
pci_name(pdev), pdev->irq);
goto err_out_unregister;
}
- hw->intr_mask = Y2_IS_BASE;
- sky2_write32(hw, B0_IMSK, hw->intr_mask);
+ sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
pci_set_drvdata(pdev, hw);
return 0;
err_out_unregister:
+ pci_disable_msi(pdev);
if (dev1) {
unregister_netdev(dev1);
free_netdev(dev1);
@@ -3311,6 +3309,7 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
sky2_read8(hw, B0_CTST);
free_irq(pdev->irq, hw);
+ pci_disable_msi(pdev);
pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index dce955c76f3..d63cd5a1b71 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -278,13 +278,11 @@ enum {
Y2_IS_CHK_TXS1 = 1<<1, /* Descriptor error TXS 1 */
Y2_IS_CHK_TXA1 = 1<<0, /* Descriptor error TXA 1 */
- Y2_IS_BASE = Y2_IS_HW_ERR | Y2_IS_STAT_BMU |
- Y2_IS_POLL_CHK | Y2_IS_TWSI_RDY |
- Y2_IS_IRQ_SW | Y2_IS_TIMINT,
- Y2_IS_PORT_1 = Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1 |
- Y2_IS_CHK_RX1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXS1,
- Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2 |
- Y2_IS_CHK_RX2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_TXS2,
+ Y2_IS_BASE = Y2_IS_HW_ERR | Y2_IS_STAT_BMU,
+ Y2_IS_PORT_1 = Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1
+ | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1,
+ Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2
+ | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2,
};
/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */
@@ -1832,6 +1830,7 @@ struct sky2_port {
struct net_device *netdev;
unsigned port;
u32 msg_enable;
+ spinlock_t phy_lock;
spinlock_t tx_lock ____cacheline_aligned_in_smp;
struct tx_ring_info *tx_ring;
@@ -1840,7 +1839,6 @@ struct sky2_port {
u16 tx_prod; /* next le to use */
u32 tx_addr64;
u16 tx_pending;
- u16 tx_last_put;
u16 tx_last_mss;
struct ring_info *rx_ring ____cacheline_aligned_in_smp;
@@ -1849,7 +1847,6 @@ struct sky2_port {
u16 rx_next; /* next re to check */
u16 rx_put; /* next le index to use */
u16 rx_pending;
- u16 rx_last_put;
u16 rx_bufsize;
#ifdef SKY2_VLAN_TAG_USED
u16 rx_tag;
@@ -1865,20 +1862,15 @@ struct sky2_port {
u8 rx_pause;
u8 tx_pause;
u8 rx_csum;
- u8 wol;
struct net_device_stats net_stats;
- struct work_struct phy_task;
- struct semaphore phy_sema;
};
struct sky2_hw {
void __iomem *regs;
struct pci_dev *pdev;
struct net_device *dev[2];
- spinlock_t hw_lock;
- u32 intr_mask;
int pm_cap;
u8 chip_id;
@@ -1889,6 +1881,8 @@ struct sky2_hw {
struct sky2_status_le *st_le;
u32 st_idx;
dma_addr_t st_dma;
+ int msi_detected;
+ wait_queue_head_t msi_wait;
};
/* Register accessor for memory mapped device */
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 75e9b3b910c..0e9833adf9f 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -215,15 +215,12 @@ struct smc_local {
spinlock_t lock;
-#ifdef SMC_CAN_USE_DATACS
- u32 __iomem *datacs;
-#endif
-
#ifdef SMC_USE_PXA_DMA
/* DMA needs the physical address of the chip */
u_long physaddr;
#endif
void __iomem *base;
+ void __iomem *datacs;
};
#if SMC_DEBUG > 0
@@ -2104,9 +2101,8 @@ static int smc_enable_device(struct platform_device *pdev)
* Set the appropriate byte/word mode.
*/
ecsr = readb(addr + (ECSR << SMC_IO_SHIFT)) & ~ECSR_IOIS8;
-#ifndef SMC_CAN_USE_16BIT
- ecsr |= ECSR_IOIS8;
-#endif
+ if (!SMC_CAN_USE_16BIT)
+ ecsr |= ECSR_IOIS8;
writeb(ecsr, addr + (ECSR << SMC_IO_SHIFT));
local_irq_restore(flags);
@@ -2143,40 +2139,39 @@ static void smc_release_attrib(struct platform_device *pdev)
release_mem_region(res->start, ATTRIB_SIZE);
}
-#ifdef SMC_CAN_USE_DATACS
-static void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev)
+static inline void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev)
{
- struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32");
- struct smc_local *lp = netdev_priv(ndev);
+ if (SMC_CAN_USE_DATACS) {
+ struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32");
+ struct smc_local *lp = netdev_priv(ndev);
- if (!res)
- return;
+ if (!res)
+ return;
- if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) {
- printk(KERN_INFO "%s: failed to request datacs memory region.\n", CARDNAME);
- return;
- }
+ if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) {
+ printk(KERN_INFO "%s: failed to request datacs memory region.\n", CARDNAME);
+ return;
+ }
- lp->datacs = ioremap(res->start, SMC_DATA_EXTENT);
+ lp->datacs = ioremap(res->start, SMC_DATA_EXTENT);
+ }
}
static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev)
{
- struct smc_local *lp = netdev_priv(ndev);
- struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32");
+ if (SMC_CAN_USE_DATACS) {
+ struct smc_local *lp = netdev_priv(ndev);
+ struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32");
- if (lp->datacs)
- iounmap(lp->datacs);
+ if (lp->datacs)
+ iounmap(lp->datacs);
- lp->datacs = NULL;
+ lp->datacs = NULL;
- if (res)
- release_mem_region(res->start, SMC_DATA_EXTENT);
+ if (res)
+ release_mem_region(res->start, SMC_DATA_EXTENT);
+ }
}
-#else
-static void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev) {}
-static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev) {}
-#endif
/*
* smc_init(void)
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index e0efd1964e7..e1be1af5120 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -275,7 +275,10 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
#define SMC_insw(a,r,p,l) readsw ((void*) ((a) + (r)), p, l)
#define SMC_outw(v,a,r) ({ writew ((v), (a) + (r)); LPD7A40X_IOBARRIER; })
-static inline void SMC_outsw (unsigned long a, int r, unsigned char* p, int l)
+#define SMC_outsw LPD7A40X_SMC_outsw
+
+static inline void LPD7A40X_SMC_outsw(unsigned long a, int r,
+ unsigned char* p, int l)
{
unsigned short* ps = (unsigned short*) p;
while (l-- > 0) {
@@ -342,10 +345,6 @@ static inline void SMC_outsw (unsigned long a, int r, unsigned char* p, int l)
#endif
-#ifndef SMC_IRQ_FLAGS
-#define SMC_IRQ_FLAGS SA_TRIGGER_RISING
-#endif
-
#ifdef SMC_USE_PXA_DMA
/*
* Let's use the DMA engine on the XScale PXA2xx for RX packets. This is
@@ -441,10 +440,85 @@ smc_pxa_dma_irq(int dma, void *dummy, struct pt_regs *regs)
#endif /* SMC_USE_PXA_DMA */
-/* Because of bank switching, the LAN91x uses only 16 I/O ports */
+/*
+ * Everything a particular hardware setup needs should have been defined
+ * at this point. Add stubs for the undefined cases, mainly to avoid
+ * compilation warnings since they'll be optimized away, or to prevent buggy
+ * use of them.
+ */
+
+#if ! SMC_CAN_USE_32BIT
+#define SMC_inl(ioaddr, reg) ({ BUG(); 0; })
+#define SMC_outl(x, ioaddr, reg) BUG()
+#define SMC_insl(a, r, p, l) BUG()
+#define SMC_outsl(a, r, p, l) BUG()
+#endif
+
+#if !defined(SMC_insl) || !defined(SMC_outsl)
+#define SMC_insl(a, r, p, l) BUG()
+#define SMC_outsl(a, r, p, l) BUG()
+#endif
+
+#if ! SMC_CAN_USE_16BIT
+
+/*
+ * Any 16-bit access is performed with two 8-bit accesses if the hardware
+ * can't do it directly. Most registers are 16-bit so those are mandatory.
+ */
+#define SMC_outw(x, ioaddr, reg) \
+ do { \
+ unsigned int __val16 = (x); \
+ SMC_outb( __val16, ioaddr, reg ); \
+ SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\
+ } while (0)
+#define SMC_inw(ioaddr, reg) \
+ ({ \
+ unsigned int __val16; \
+ __val16 = SMC_inb( ioaddr, reg ); \
+ __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \
+ __val16; \
+ })
+
+#define SMC_insw(a, r, p, l) BUG()
+#define SMC_outsw(a, r, p, l) BUG()
+
+#endif
+
+#if !defined(SMC_insw) || !defined(SMC_outsw)
+#define SMC_insw(a, r, p, l) BUG()
+#define SMC_outsw(a, r, p, l) BUG()
+#endif
+
+#if ! SMC_CAN_USE_8BIT
+#define SMC_inb(ioaddr, reg) ({ BUG(); 0; })
+#define SMC_outb(x, ioaddr, reg) BUG()
+#define SMC_insb(a, r, p, l) BUG()
+#define SMC_outsb(a, r, p, l) BUG()
+#endif
+
+#if !defined(SMC_insb) || !defined(SMC_outsb)
+#define SMC_insb(a, r, p, l) BUG()
+#define SMC_outsb(a, r, p, l) BUG()
+#endif
+
+#ifndef SMC_CAN_USE_DATACS
+#define SMC_CAN_USE_DATACS 0
+#endif
+
#ifndef SMC_IO_SHIFT
#define SMC_IO_SHIFT 0
#endif
+
+#ifndef SMC_IRQ_FLAGS
+#define SMC_IRQ_FLAGS SA_TRIGGER_RISING
+#endif
+
+#ifndef SMC_INTERRUPT_PREAMBLE
+#define SMC_INTERRUPT_PREAMBLE
+#endif
+
+
+/* Because of bank switching, the LAN91x uses only 16 I/O ports */
#define SMC_IO_EXTENT (16 << SMC_IO_SHIFT)
#define SMC_DATA_EXTENT (4)
@@ -817,6 +891,11 @@ static const char * chip_ids[ 16 ] = {
* Note: the following macros do *not* select the bank -- this must
* be done separately as needed in the main code. The SMC_REG() macro
* only uses the bank argument for debugging purposes (when enabled).
+ *
+ * Note: despite inline functions being safer, everything leading to this
+ * should preferably be macros to let BUG() display the line number in
+ * the core source code since we're interested in the top call site
+ * not in any inline function location.
*/
#if SMC_DEBUG > 0
@@ -834,62 +913,142 @@ static const char * chip_ids[ 16 ] = {
#define SMC_REG(reg, bank) (reg<<SMC_IO_SHIFT)
#endif
-#if SMC_CAN_USE_8BIT
-#define SMC_GET_PN() SMC_inb( ioaddr, PN_REG )
-#define SMC_SET_PN(x) SMC_outb( x, ioaddr, PN_REG )
-#define SMC_GET_AR() SMC_inb( ioaddr, AR_REG )
-#define SMC_GET_TXFIFO() SMC_inb( ioaddr, TXFIFO_REG )
-#define SMC_GET_RXFIFO() SMC_inb( ioaddr, RXFIFO_REG )
-#define SMC_GET_INT() SMC_inb( ioaddr, INT_REG )
-#define SMC_ACK_INT(x) SMC_outb( x, ioaddr, INT_REG )
-#define SMC_GET_INT_MASK() SMC_inb( ioaddr, IM_REG )
-#define SMC_SET_INT_MASK(x) SMC_outb( x, ioaddr, IM_REG )
-#else
-#define SMC_GET_PN() (SMC_inw( ioaddr, PN_REG ) & 0xFF)
-#define SMC_SET_PN(x) SMC_outw( x, ioaddr, PN_REG )
-#define SMC_GET_AR() (SMC_inw( ioaddr, PN_REG ) >> 8)
-#define SMC_GET_TXFIFO() (SMC_inw( ioaddr, TXFIFO_REG ) & 0xFF)
-#define SMC_GET_RXFIFO() (SMC_inw( ioaddr, TXFIFO_REG ) >> 8)
-#define SMC_GET_INT() (SMC_inw( ioaddr, INT_REG ) & 0xFF)
+/*
+ * Hack Alert: Some setups just can't write 8 or 16 bits reliably when not
+ * aligned to a 32 bit boundary. I tell you that does exist!
+ * Fortunately the affected register accesses can be easily worked around
+ * since we can write zeroes to the preceeding 16 bits without adverse
+ * effects and use a 32-bit access.
+ *
+ * Enforce it on any 32-bit capable setup for now.
+ */
+#define SMC_MUST_ALIGN_WRITE SMC_CAN_USE_32BIT
+
+#define SMC_GET_PN() \
+ ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, PN_REG)) \
+ : (SMC_inw(ioaddr, PN_REG) & 0xFF) )
+
+#define SMC_SET_PN(x) \
+ do { \
+ if (SMC_MUST_ALIGN_WRITE) \
+ SMC_outl((x)<<16, ioaddr, SMC_REG(0, 2)); \
+ else if (SMC_CAN_USE_8BIT) \
+ SMC_outb(x, ioaddr, PN_REG); \
+ else \
+ SMC_outw(x, ioaddr, PN_REG); \
+ } while (0)
+
+#define SMC_GET_AR() \
+ ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, AR_REG)) \
+ : (SMC_inw(ioaddr, PN_REG) >> 8) )
+
+#define SMC_GET_TXFIFO() \
+ ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, TXFIFO_REG)) \
+ : (SMC_inw(ioaddr, TXFIFO_REG) & 0xFF) )
+
+#define SMC_GET_RXFIFO() \
+ ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, RXFIFO_REG)) \
+ : (SMC_inw(ioaddr, TXFIFO_REG) >> 8) )
+
+#define SMC_GET_INT() \
+ ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, INT_REG)) \
+ : (SMC_inw(ioaddr, INT_REG) & 0xFF) )
+
#define SMC_ACK_INT(x) \
do { \
- unsigned long __flags; \
- int __mask; \
- local_irq_save(__flags); \
- __mask = SMC_inw( ioaddr, INT_REG ) & ~0xff; \
- SMC_outw( __mask | (x), ioaddr, INT_REG ); \
- local_irq_restore(__flags); \
+ if (SMC_CAN_USE_8BIT) \
+ SMC_outb(x, ioaddr, INT_REG); \
+ else { \
+ unsigned long __flags; \
+ int __mask; \
+ local_irq_save(__flags); \
+ __mask = SMC_inw( ioaddr, INT_REG ) & ~0xff; \
+ SMC_outw( __mask | (x), ioaddr, INT_REG ); \
+ local_irq_restore(__flags); \
+ } \
+ } while (0)
+
+#define SMC_GET_INT_MASK() \
+ ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, IM_REG)) \
+ : (SMC_inw( ioaddr, INT_REG ) >> 8) )
+
+#define SMC_SET_INT_MASK(x) \
+ do { \
+ if (SMC_CAN_USE_8BIT) \
+ SMC_outb(x, ioaddr, IM_REG); \
+ else \
+ SMC_outw((x) << 8, ioaddr, INT_REG); \
+ } while (0)
+
+#define SMC_CURRENT_BANK() SMC_inw(ioaddr, BANK_SELECT)
+
+#define SMC_SELECT_BANK(x) \
+ do { \
+ if (SMC_MUST_ALIGN_WRITE) \
+ SMC_outl((x)<<16, ioaddr, 12<<SMC_IO_SHIFT); \
+ else \
+ SMC_outw(x, ioaddr, BANK_SELECT); \
+ } while (0)
+
+#define SMC_GET_BASE() SMC_inw(ioaddr, BASE_REG)
+
+#define SMC_SET_BASE(x) SMC_outw(x, ioaddr, BASE_REG)
+
+#define SMC_GET_CONFIG() SMC_inw(ioaddr, CONFIG_REG)
+
+#define SMC_SET_CONFIG(x) SMC_outw(x, ioaddr, CONFIG_REG)
+
+#define SMC_GET_COUNTER() SMC_inw(ioaddr, COUNTER_REG)
+
+#define SMC_GET_CTL() SMC_inw(ioaddr, CTL_REG)
+
+#define SMC_SET_CTL(x) SMC_outw(x, ioaddr, CTL_REG)
+
+#define SMC_GET_MII() SMC_inw(ioaddr, MII_REG)
+
+#define SMC_SET_MII(x) SMC_outw(x, ioaddr, MII_REG)
+
+#define SMC_GET_MIR() SMC_inw(ioaddr, MIR_REG)
+
+#define SMC_SET_MIR(x) SMC_outw(x, ioaddr, MIR_REG)
+
+#define SMC_GET_MMU_CMD() SMC_inw(ioaddr, MMU_CMD_REG)
+
+#define SMC_SET_MMU_CMD(x) SMC_outw(x, ioaddr, MMU_CMD_REG)
+
+#define SMC_GET_FIFO() SMC_inw(ioaddr, FIFO_REG)
+
+#define SMC_GET_PTR() SMC_inw(ioaddr, PTR_REG)
+
+#define SMC_SET_PTR(x) \
+ do { \
+ if (SMC_MUST_ALIGN_WRITE) \
+ SMC_outl((x)<<16, ioaddr, SMC_REG(4, 2)); \
+ else \
+ SMC_outw(x, ioaddr, PTR_REG); \
} while (0)
-#define SMC_GET_INT_MASK() (SMC_inw( ioaddr, INT_REG ) >> 8)
-#define SMC_SET_INT_MASK(x) SMC_outw( (x) << 8, ioaddr, INT_REG )
-#endif
-#define SMC_CURRENT_BANK() SMC_inw( ioaddr, BANK_SELECT )
-#define SMC_SELECT_BANK(x) SMC_outw( x, ioaddr, BANK_SELECT )
-#define SMC_GET_BASE() SMC_inw( ioaddr, BASE_REG )
-#define SMC_SET_BASE(x) SMC_outw( x, ioaddr, BASE_REG )
-#define SMC_GET_CONFIG() SMC_inw( ioaddr, CONFIG_REG )
-#define SMC_SET_CONFIG(x) SMC_outw( x, ioaddr, CONFIG_REG )
-#define SMC_GET_COUNTER() SMC_inw( ioaddr, COUNTER_REG )
-#define SMC_GET_CTL() SMC_inw( ioaddr, CTL_REG )
-#define SMC_SET_CTL(x) SMC_outw( x, ioaddr, CTL_REG )
-#define SMC_GET_MII() SMC_inw( ioaddr, MII_REG )
-#define SMC_SET_MII(x) SMC_outw( x, ioaddr, MII_REG )
-#define SMC_GET_MIR() SMC_inw( ioaddr, MIR_REG )
-#define SMC_SET_MIR(x) SMC_outw( x, ioaddr, MIR_REG )
-#define SMC_GET_MMU_CMD() SMC_inw( ioaddr, MMU_CMD_REG )
-#define SMC_SET_MMU_CMD(x) SMC_outw( x, ioaddr, MMU_CMD_REG )
-#define SMC_GET_FIFO() SMC_inw( ioaddr, FIFO_REG )
-#define SMC_GET_PTR() SMC_inw( ioaddr, PTR_REG )
-#define SMC_SET_PTR(x) SMC_outw( x, ioaddr, PTR_REG )
-#define SMC_GET_EPH_STATUS() SMC_inw( ioaddr, EPH_STATUS_REG )
-#define SMC_GET_RCR() SMC_inw( ioaddr, RCR_REG )
-#define SMC_SET_RCR(x) SMC_outw( x, ioaddr, RCR_REG )
-#define SMC_GET_REV() SMC_inw( ioaddr, REV_REG )
-#define SMC_GET_RPC() SMC_inw( ioaddr, RPC_REG )
-#define SMC_SET_RPC(x) SMC_outw( x, ioaddr, RPC_REG )
-#define SMC_GET_TCR() SMC_inw( ioaddr, TCR_REG )
-#define SMC_SET_TCR(x) SMC_outw( x, ioaddr, TCR_REG )
+#define SMC_GET_EPH_STATUS() SMC_inw(ioaddr, EPH_STATUS_REG)
+
+#define SMC_GET_RCR() SMC_inw(ioaddr, RCR_REG)
+
+#define SMC_SET_RCR(x) SMC_outw(x, ioaddr, RCR_REG)
+
+#define SMC_GET_REV() SMC_inw(ioaddr, REV_REG)
+
+#define SMC_GET_RPC() SMC_inw(ioaddr, RPC_REG)
+
+#define SMC_SET_RPC(x) \
+ do { \
+ if (SMC_MUST_ALIGN_WRITE) \
+ SMC_outl((x)<<16, ioaddr, SMC_REG(8, 0)); \
+ else \
+ SMC_outw(x, ioaddr, RPC_REG); \
+ } while (0)
+
+#define SMC_GET_TCR() SMC_inw(ioaddr, TCR_REG)
+
+#define SMC_SET_TCR(x) SMC_outw(x, ioaddr, TCR_REG)
#ifndef SMC_GET_MAC_ADDR
#define SMC_GET_MAC_ADDR(addr) \
@@ -920,151 +1079,84 @@ static const char * chip_ids[ 16 ] = {
SMC_outw( mt[6] | (mt[7] << 8), ioaddr, MCAST_REG4 ); \
} while (0)
-#if SMC_CAN_USE_32BIT
-/*
- * Some setups just can't write 8 or 16 bits reliably when not aligned
- * to a 32 bit boundary. I tell you that exists!
- * We re-do the ones here that can be easily worked around if they can have
- * their low parts written to 0 without adverse effects.
- */
-#undef SMC_SELECT_BANK
-#define SMC_SELECT_BANK(x) SMC_outl( (x)<<16, ioaddr, 12<<SMC_IO_SHIFT )
-#undef SMC_SET_RPC
-#define SMC_SET_RPC(x) SMC_outl( (x)<<16, ioaddr, SMC_REG(8, 0) )
-#undef SMC_SET_PN
-#define SMC_SET_PN(x) SMC_outl( (x)<<16, ioaddr, SMC_REG(0, 2) )
-#undef SMC_SET_PTR
-#define SMC_SET_PTR(x) SMC_outl( (x)<<16, ioaddr, SMC_REG(4, 2) )
-#endif
-
-#if SMC_CAN_USE_32BIT
-#define SMC_PUT_PKT_HDR(status, length) \
- SMC_outl( (status) | (length) << 16, ioaddr, DATA_REG )
-#define SMC_GET_PKT_HDR(status, length) \
- do { \
- unsigned int __val = SMC_inl( ioaddr, DATA_REG ); \
- (status) = __val & 0xffff; \
- (length) = __val >> 16; \
- } while (0)
-#else
#define SMC_PUT_PKT_HDR(status, length) \
do { \
- SMC_outw( status, ioaddr, DATA_REG ); \
- SMC_outw( length, ioaddr, DATA_REG ); \
- } while (0)
-#define SMC_GET_PKT_HDR(status, length) \
- do { \
- (status) = SMC_inw( ioaddr, DATA_REG ); \
- (length) = SMC_inw( ioaddr, DATA_REG ); \
+ if (SMC_CAN_USE_32BIT) \
+ SMC_outl((status) | (length)<<16, ioaddr, DATA_REG); \
+ else { \
+ SMC_outw(status, ioaddr, DATA_REG); \
+ SMC_outw(length, ioaddr, DATA_REG); \
+ } \
} while (0)
-#endif
-#if SMC_CAN_USE_32BIT
-#define _SMC_PUSH_DATA(p, l) \
+#define SMC_GET_PKT_HDR(status, length) \
do { \
- char *__ptr = (p); \
- int __len = (l); \
- if (__len >= 2 && (unsigned long)__ptr & 2) { \
- __len -= 2; \
- SMC_outw( *(u16 *)__ptr, ioaddr, DATA_REG ); \
- __ptr += 2; \
- } \
- SMC_outsl( ioaddr, DATA_REG, __ptr, __len >> 2); \
- if (__len & 2) { \
- __ptr += (__len & ~3); \
- SMC_outw( *((u16 *)__ptr), ioaddr, DATA_REG ); \
+ if (SMC_CAN_USE_32BIT) { \
+ unsigned int __val = SMC_inl(ioaddr, DATA_REG); \
+ (status) = __val & 0xffff; \
+ (length) = __val >> 16; \
+ } else { \
+ (status) = SMC_inw(ioaddr, DATA_REG); \
+ (length) = SMC_inw(ioaddr, DATA_REG); \
} \
} while (0)
-#define _SMC_PULL_DATA(p, l) \
- do { \
- char *__ptr = (p); \
- int __len = (l); \
- if ((unsigned long)__ptr & 2) { \
- /* \
- * We want 32bit alignment here. \
- * Since some buses perform a full 32bit \
- * fetch even for 16bit data we can't use \
- * SMC_inw() here. Back both source (on chip \
- * and destination) pointers of 2 bytes. \
- */ \
- __ptr -= 2; \
- __len += 2; \
- SMC_SET_PTR( 2|PTR_READ|PTR_RCV|PTR_AUTOINC ); \
- } \
- __len += 2; \
- SMC_insl( ioaddr, DATA_REG, __ptr, __len >> 2); \
- } while (0)
-#elif SMC_CAN_USE_16BIT
-#define _SMC_PUSH_DATA(p, l) SMC_outsw( ioaddr, DATA_REG, p, (l) >> 1 )
-#define _SMC_PULL_DATA(p, l) SMC_insw ( ioaddr, DATA_REG, p, (l) >> 1 )
-#elif SMC_CAN_USE_8BIT
-#define _SMC_PUSH_DATA(p, l) SMC_outsb( ioaddr, DATA_REG, p, l )
-#define _SMC_PULL_DATA(p, l) SMC_insb ( ioaddr, DATA_REG, p, l )
-#endif
-#if ! SMC_CAN_USE_16BIT
-#define SMC_outw(x, ioaddr, reg) \
+#define SMC_PUSH_DATA(p, l) \
do { \
- unsigned int __val16 = (x); \
- SMC_outb( __val16, ioaddr, reg ); \
- SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\
+ if (SMC_CAN_USE_32BIT) { \
+ void *__ptr = (p); \
+ int __len = (l); \
+ void *__ioaddr = ioaddr; \
+ if (__len >= 2 && (unsigned long)__ptr & 2) { \
+ __len -= 2; \
+ SMC_outw(*(u16 *)__ptr, ioaddr, DATA_REG); \
+ __ptr += 2; \
+ } \
+ if (SMC_CAN_USE_DATACS && lp->datacs) \
+ __ioaddr = lp->datacs; \
+ SMC_outsl(__ioaddr, DATA_REG, __ptr, __len>>2); \
+ if (__len & 2) { \
+ __ptr += (__len & ~3); \
+ SMC_outw(*((u16 *)__ptr), ioaddr, DATA_REG); \
+ } \
+ } else if (SMC_CAN_USE_16BIT) \
+ SMC_outsw(ioaddr, DATA_REG, p, (l) >> 1); \
+ else if (SMC_CAN_USE_8BIT) \
+ SMC_outsb(ioaddr, DATA_REG, p, l); \
} while (0)
-#define SMC_inw(ioaddr, reg) \
- ({ \
- unsigned int __val16; \
- __val16 = SMC_inb( ioaddr, reg ); \
- __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \
- __val16; \
- })
-#endif
-
-#ifdef SMC_CAN_USE_DATACS
-#define SMC_PUSH_DATA(p, l) \
- if ( lp->datacs ) { \
- unsigned char *__ptr = (p); \
- int __len = (l); \
- if (__len >= 2 && (unsigned long)__ptr & 2) { \
- __len -= 2; \
- SMC_outw( *((u16 *)__ptr), ioaddr, DATA_REG ); \
- __ptr += 2; \
- } \
- outsl(lp->datacs, __ptr, __len >> 2); \
- if (__len & 2) { \
- __ptr += (__len & ~3); \
- SMC_outw( *((u16 *)__ptr), ioaddr, DATA_REG ); \
- } \
- } else { \
- _SMC_PUSH_DATA(p, l); \
- }
#define SMC_PULL_DATA(p, l) \
- if ( lp->datacs ) { \
- unsigned char *__ptr = (p); \
- int __len = (l); \
- if ((unsigned long)__ptr & 2) { \
- /* \
- * We want 32bit alignment here. \
- * Since some buses perform a full 32bit \
- * fetch even for 16bit data we can't use \
- * SMC_inw() here. Back both source (on chip \
- * and destination) pointers of 2 bytes. \
- */ \
- __ptr -= 2; \
+ do { \
+ if (SMC_CAN_USE_32BIT) { \
+ void *__ptr = (p); \
+ int __len = (l); \
+ void *__ioaddr = ioaddr; \
+ if ((unsigned long)__ptr & 2) { \
+ /* \
+ * We want 32bit alignment here. \
+ * Since some buses perform a full \
+ * 32bit fetch even for 16bit data \
+ * we can't use SMC_inw() here. \
+ * Back both source (on-chip) and \
+ * destination pointers of 2 bytes. \
+ * This is possible since the call to \
+ * SMC_GET_PKT_HDR() already advanced \
+ * the source pointer of 4 bytes, and \
+ * the skb_reserve(skb, 2) advanced \
+ * the destination pointer of 2 bytes. \
+ */ \
+ __ptr -= 2; \
+ __len += 2; \
+ SMC_SET_PTR(2|PTR_READ|PTR_RCV|PTR_AUTOINC); \
+ } \
+ if (SMC_CAN_USE_DATACS && lp->datacs) \
+ __ioaddr = lp->datacs; \
__len += 2; \
- SMC_SET_PTR( 2|PTR_READ|PTR_RCV|PTR_AUTOINC ); \
- } \
- __len += 2; \
- insl( lp->datacs, __ptr, __len >> 2); \
- } else { \
- _SMC_PULL_DATA(p, l); \
- }
-#else
-#define SMC_PUSH_DATA(p, l) _SMC_PUSH_DATA(p, l)
-#define SMC_PULL_DATA(p, l) _SMC_PULL_DATA(p, l)
-#endif
-
-#if !defined (SMC_INTERRUPT_PREAMBLE)
-# define SMC_INTERRUPT_PREAMBLE
-#endif
+ SMC_insl(__ioaddr, DATA_REG, __ptr, __len>>2); \
+ } else if (SMC_CAN_USE_16BIT) \
+ SMC_insw(ioaddr, DATA_REG, p, (l) >> 1); \
+ else if (SMC_CAN_USE_8BIT) \
+ SMC_insb(ioaddr, DATA_REG, p, l); \
+ } while (0)
#endif /* _SMC91X_H_ */
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index e03d1ae50c3..88829eb9568 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -69,8 +69,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.52"
-#define DRV_MODULE_RELDATE "Mar 06, 2006"
+#define DRV_MODULE_VERSION "3.53"
+#define DRV_MODULE_RELDATE "Mar 22, 2006"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -1148,6 +1148,19 @@ static int tg3_halt_cpu(struct tg3 *, u32);
static int tg3_nvram_lock(struct tg3 *);
static void tg3_nvram_unlock(struct tg3 *);
+static void tg3_power_down_phy(struct tg3 *tp)
+{
+ /* The PHY should not be powered down on some chips because
+ * of bugs.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
+ (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
+ return;
+ tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
+}
+
static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
{
u32 misc_host_ctrl;
@@ -1327,8 +1340,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
tg3_writephy(tp, MII_TG3_EXT_CTRL,
MII_TG3_EXT_CTRL_FORCE_LED_OFF);
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
- tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
+ tg3_power_down_phy(tp);
}
}
@@ -9436,12 +9448,18 @@ static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
return NULL;
}
-/* Since this function may be called in D3-hot power state during
- * tg3_init_one(), only config cycles are allowed.
- */
static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
{
u32 val;
+ u16 pmcsr;
+
+ /* On some early chips the SRAM cannot be accessed in D3hot state,
+ * so need make sure we're in D0.
+ */
+ pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
+ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
+ msleep(1);
/* Make sure register accesses (indirect or otherwise)
* will function correctly.